From cb8453ff542feefa25ce51d51bc068197e8000f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nils=20Sj=C3=B6gren?= Date: Wed, 25 Sep 2019 09:40:28 +0200 Subject: [PATCH 01/55] Add support for glue version on aws_glue_job Fixes #9524 --- aws/resource_aws_glue_job.go | 13 ++++++++ aws/resource_aws_glue_job_test.go | 53 +++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/aws/resource_aws_glue_job.go b/aws/resource_aws_glue_job.go index 79676588819..5c9b111aef4 100644 --- a/aws/resource_aws_glue_job.go +++ b/aws/resource_aws_glue_job.go @@ -66,6 +66,10 @@ func resourceAwsGlueJob() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "glue_version": { + Type: schema.TypeString, + Optional: true, + }, "execution_property": { Type: schema.TypeList, Optional: true, @@ -155,6 +159,10 @@ func resourceAwsGlueJobCreate(d *schema.ResourceData, meta interface{}) error { input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("glue_version"); ok { + input.GlueVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("execution_property"); ok { input.ExecutionProperty = expandGlueExecutionProperty(v.([]interface{})) } @@ -213,6 +221,7 @@ func resourceAwsGlueJobRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting default_arguments: %s", err) } d.Set("description", job.Description) + d.Set("glue_version", job.GlueVersion) if err := d.Set("execution_property", flattenGlueExecutionProperty(job.ExecutionProperty)); err != nil { return fmt.Errorf("error setting execution_property: %s", err) } @@ -267,6 +276,10 @@ func resourceAwsGlueJobUpdate(d *schema.ResourceData, meta interface{}) error { jobUpdate.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("glue_version"); ok { + jobUpdate.GlueVersion = aws.String(v.(string)) + } + if v, ok := d.GetOk("execution_property"); ok { jobUpdate.ExecutionProperty = expandGlueExecutionProperty(v.([]interface{})) } diff --git a/aws/resource_aws_glue_job_test.go b/aws/resource_aws_glue_job_test.go index b9aedb3b160..b5d4d6a56c8 100644 --- a/aws/resource_aws_glue_job_test.go +++ b/aws/resource_aws_glue_job_test.go @@ -233,6 +233,40 @@ func TestAccAWSGlueJob_Description(t *testing.T) { }) } +func TestAccAWSGlueJob_GlueVersion(t *testing.T) { + var job glue.Job + + rName := fmt.Sprintf("tf-acc-test-%s", acctest.RandString(5)) + resourceName := "aws_glue_job.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGlueJobDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSGlueJobConfig_GlueVersion(rName, "0.9"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueJobExists(resourceName, &job), + resource.TestCheckResourceAttr(resourceName, "glue_version", "0.9"), + ), + }, + { + Config: testAccAWSGlueJobConfig_GlueVersion(rName, "1.0"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGlueJobExists(resourceName, &job), + resource.TestCheckResourceAttr(resourceName, "glue_version", "1.0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccAWSGlueJob_ExecutionProperty(t *testing.T) { var job glue.Job @@ -645,6 +679,25 @@ resource "aws_glue_job" "test" { `, testAccAWSGlueJobConfig_Base(rName), description, rName) } +func testAccAWSGlueJobConfig_GlueVersion(rName, glueVersion string) string { + return fmt.Sprintf(` +%s + +resource "aws_glue_job" "test" { + glue_version = "%s" + name = "%s" + role_arn = "${aws_iam_role.test.arn}" + allocated_capacity = 10 + + command { + script_location = "testscriptlocation" + } + + depends_on = ["aws_iam_role_policy_attachment.test"] +} +`, testAccAWSGlueJobConfig_Base(rName), glueVersion, rName) +} + func testAccAWSGlueJobConfig_ExecutionProperty(rName string, maxConcurrentRuns int) string { return fmt.Sprintf(` %s From f875f4ce99e0e0cc9b16877ca2b926a7ade2b42a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nils=20Sj=C3=B6gren?= Date: Wed, 25 Sep 2019 09:51:28 +0200 Subject: [PATCH 02/55] Update resource aws_glue_job documentation Add glue_version argument reference --- website/docs/r/glue_job.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/glue_job.html.markdown b/website/docs/r/glue_job.html.markdown index af6b630e81c..a1935d3890d 100644 --- a/website/docs/r/glue_job.html.markdown +++ b/website/docs/r/glue_job.html.markdown @@ -61,7 +61,8 @@ be removed in future releases, please use `max_capacity` instead. * `role_arn` – (Required) The ARN of the IAM role associated with this job. * `timeout` – (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours). * `security_configuration` - (Optional) The name of the Security Configuration to be associated with the job. - +* `glue_version` - (Optional) The version of glue to use, for example "1.0". For information about available versionse see [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). + ### command Argument Reference * `name` - (Optional) The name of the job command. Defaults to `glueetl` From 1385ecf9ac1bc976816562ded2cea04e96f01c13 Mon Sep 17 00:00:00 2001 From: Ben Reich Date: Fri, 4 Oct 2019 08:56:12 +1000 Subject: [PATCH 03/55] Fixes #9939: Add support for KMS Key Id for at rest encryption --- ...ource_aws_elasticache_replication_group.go | 11 +++ ..._aws_elasticache_replication_group_test.go | 85 +++++++++++++++++++ ...lasticache_replication_group.html.markdown | 1 + 3 files changed, 97 insertions(+) diff --git a/aws/resource_aws_elasticache_replication_group.go b/aws/resource_aws_elasticache_replication_group.go index ca461dbe480..e7721907d73 100644 --- a/aws/resource_aws_elasticache_replication_group.go +++ b/aws/resource_aws_elasticache_replication_group.go @@ -229,6 +229,11 @@ func resourceAwsElasticacheReplicationGroup() *schema.Resource { Default: false, ForceNew: true, }, + "kms_key_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, }, SchemaVersion: 1, @@ -306,6 +311,10 @@ func resourceAwsElasticacheReplicationGroupCreate(d *schema.ResourceData, meta i params.NotificationTopicArn = aws.String(v.(string)) } + if v, ok := d.GetOk("kms_key_id"); ok { + params.KmsKeyId = aws.String(v.(string)) + } + if v, ok := d.GetOk("snapshot_retention_limit"); ok { params.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) } @@ -427,6 +436,8 @@ func resourceAwsElasticacheReplicationGroupRead(d *schema.ResourceData, meta int } } + d.Set("kms_key_id", rgp.KmsKeyId) + d.Set("replication_group_description", rgp.Description) d.Set("number_cache_clusters", len(rgp.MemberClusters)) if err := d.Set("member_clusters", flattenStringList(rgp.MemberClusters)); err != nil { diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 4da3c656647..7830bafeafe 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -493,6 +493,24 @@ func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) }) } +func TestAccAWSElasticacheReplicationGroup_useCmkKmsKeyId(t *testing.T) { + var rg elasticache.ReplicationGroup + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSElasticacheReplicationGroup_UseCmkKmsKeyId(acctest.RandInt(), acctest.RandString(10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + resource.TestCheckResourceAttrSet("aws_elasticache_replication_group.bar", "kms_key_id"), + ), + }, + }, + }) +} + func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") @@ -1333,6 +1351,73 @@ resource "aws_elasticache_replication_group" "bar" { `, rName, numNodeGroups, replicasPerNodeGroup) } +func testAccAWSElasticacheReplicationGroup_UseCmkKmsKeyId(rInt int, rString string) string { + return fmt.Sprintf(` +data "aws_availability_zones" "available" { + state = "available" +} + +resource "aws_vpc" "foo" { + cidr_block = "192.168.0.0/16" + + tags = { + Name = "terraform-testacc-elasticache-replication-group-at-rest-encryption" + } +} + +resource "aws_subnet" "foo" { + vpc_id = "${aws_vpc.foo.id}" + cidr_block = "192.168.0.0/20" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + + tags = { + Name = "tf-acc-elasticache-replication-group-at-rest-encryption" + } +} + +resource "aws_elasticache_subnet_group" "bar" { + name = "tf-test-cache-subnet-%03d" + description = "tf-test-cache-subnet-group-descr" + + subnet_ids = [ + "${aws_subnet.foo.id}", + ] +} + +resource "aws_security_group" "bar" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + vpc_id = "${aws_vpc.foo.id}" + + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_kms_key" "bar" { + description = "tf-test-cmk-kms-key-id" +} + +resource "aws_elasticache_replication_group" "bar" { + replication_group_id = "tf-%s" + replication_group_description = "test description" + node_type = "cache.t2.micro" + number_cache_clusters = "1" + port = 6379 + subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" + security_group_ids = ["${aws_security_group.bar.id}"] + parameter_group_name = "default.redis3.2" + availability_zones = ["${data.aws_availability_zones.available.names[0]}"] + engine_version = "3.2.6" + at_rest_encryption_enabled = true + kms_key_id = "${aws_kms_key.bar.arn}" +} +`, rInt, rInt, rString) +} + func testAccAWSElasticacheReplicationGroup_EnableAtRestEncryptionConfig(rInt int, rString string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index a31e864199a..dc90ba6fdc6 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -108,6 +108,7 @@ The following arguments are supported: * `at_rest_encryption_enabled` - (Optional) Whether to enable encryption at rest. * `transit_encryption_enabled` - (Optional) Whether to enable encryption in transit. * `auth_token` - (Optional) The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. +* `kms_key_id` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`. * `engine_version` - (Optional) The version number of the cache engine to be used for the cache clusters in this replication group. * `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. * `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. From e78f27ba79d51e546955ca998e961fd54b7bf710 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 24 May 2019 18:53:54 -0400 Subject: [PATCH 04/55] Handle missing dx_gateway_association_id attribute. Add state migration for r/aws_dx_gateway_association. --- aws/resource_aws_dx_gateway_association.go | 3 ++ ...urce_aws_dx_gateway_association_migrate.go | 43 +++++++++++++++++++ 2 files changed, 46 insertions(+) create mode 100644 aws/resource_aws_dx_gateway_association_migrate.go diff --git a/aws/resource_aws_dx_gateway_association.go b/aws/resource_aws_dx_gateway_association.go index 4034c72a68c..84679da47a8 100644 --- a/aws/resource_aws_dx_gateway_association.go +++ b/aws/resource_aws_dx_gateway_association.go @@ -27,6 +27,9 @@ func resourceAwsDxGatewayAssociation() *schema.Resource { State: resourceAwsDxGatewayAssociationImport, }, + SchemaVersion: 1, + MigrateState: resourceAwsDxGatewayAssociationMigrateState, + Schema: map[string]*schema.Schema{ "allowed_prefixes": { Type: schema.TypeSet, diff --git a/aws/resource_aws_dx_gateway_association_migrate.go b/aws/resource_aws_dx_gateway_association_migrate.go new file mode 100644 index 00000000000..601a2d10ae0 --- /dev/null +++ b/aws/resource_aws_dx_gateway_association_migrate.go @@ -0,0 +1,43 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/directconnect" + "github.com/hashicorp/terraform/terraform" +) + +func resourceAwsDxGatewayAssociationMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + switch v { + case 0: + log.Println("[INFO] Found Direct Connect gateway association state v0; migrating to v1") + return migrateDxGatewayAssociationStateV0toV1(is, meta) + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateDxGatewayAssociationStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + conn := meta.(*AWSClient).dxconn + + // dx_gateway_association_id was introduced in v2.8.0. Handle the case where it's not yet present. + if _, ok := is.Attributes["dx_gateway_association_id"]; !ok { + resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ + DirectConnectGatewayId: aws.String(is.Attributes["dx_gateway_id"]), + VirtualGatewayId: aws.String(is.Attributes["vpn_gateway_id"]), + }) + if err != nil { + return nil, err + } + + if len(resp.DirectConnectGatewayAssociations) == 0 { + return nil, fmt.Errorf("Direct Connect gateway association not found, remove from state using 'terraform state rm'") + } + + is.Attributes["dx_gateway_association_id"] = aws.StringValue(resp.DirectConnectGatewayAssociations[0].AssociationId) + } + + return is, nil +} From dee0d541db004b505b920dbc55e50fd39d976deb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 27 May 2019 13:43:11 -0400 Subject: [PATCH 05/55] Add r/aws_dx_gateway_association state migration acceptance test. --- ...esource_aws_dx_gateway_association_test.go | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/aws/resource_aws_dx_gateway_association_test.go b/aws/resource_aws_dx_gateway_association_test.go index c6c3af408fd..4840cf933c3 100644 --- a/aws/resource_aws_dx_gateway_association_test.go +++ b/aws/resource_aws_dx_gateway_association_test.go @@ -206,6 +206,7 @@ func TestAccAwsDxGatewayAssociation_deprecatedSingleAccount(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "dx_gateway_owner_account_id"), resource.TestCheckResourceAttr(resourceName, "allowed_prefixes.#", "1"), resource.TestCheckResourceAttr(resourceName, "allowed_prefixes.1216997074", "10.255.255.0/28"), + testAccCheckAwsDxGatewayAssociationMigrateState(resourceName), ), }, }, @@ -522,6 +523,39 @@ func testAccCheckAwsDxGatewayAssociationExists(name string) resource.TestCheckFu } } +func testAccCheckAwsDxGatewayAssociationMigrateState(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + is := &terraform.InstanceState{ + ID: rs.Primary.ID, + Attributes: map[string]string{ + "dx_gateway_id": rs.Primary.Attributes["dx_gateway_id"], + "vpn_gateway_id": rs.Primary.Attributes["vpn_gateway_id"], + }, + } + + is, err := resourceAwsDxGatewayAssociation().MigrateState(0, is, testAccProvider.Meta()) + if err != nil { + return err + } + + if is.Attributes["dx_gateway_association_id"] != rs.Primary.Attributes["dx_gateway_association_id"] { + return fmt.Errorf("Invalid dx_gateway_association_id attribute in migrated state. Expected %s, got %s", + rs.Primary.Attributes["dx_gateway_association_id"], + is.Attributes["dx_gateway_association_id"]) + } + + return nil + } +} + func testAccDxGatewayAssociationConfigBase_vpnGatewaySingleAccount(rName string, rBgpAsn int) string { return fmt.Sprintf(` resource "aws_dx_gateway" "test" { From 72492bebd8baed7dd9d389573a10c800c0e3cf21 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 4 Oct 2019 10:36:41 -0400 Subject: [PATCH 06/55] Terraform Plugin SDK migration. --- aws/resource_aws_dx_gateway_association_migrate.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/aws/resource_aws_dx_gateway_association_migrate.go b/aws/resource_aws_dx_gateway_association_migrate.go index 601a2d10ae0..80c9c12106a 100644 --- a/aws/resource_aws_dx_gateway_association_migrate.go +++ b/aws/resource_aws_dx_gateway_association_migrate.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform-plugin-sdk/terraform" ) func resourceAwsDxGatewayAssociationMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { From bac6b23bad458e55de28a873f54deb2d9d7995cd Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Thu, 19 Sep 2019 13:46:37 +0200 Subject: [PATCH 07/55] Import test refactor for elasticache resources --- ..._aws_elasticache_replication_group_test.go | 646 +++++++++++------- ...rce_aws_elasticache_security_group_test.go | 34 +- ...ource_aws_elasticache_subnet_group_test.go | 68 +- 3 files changed, 435 insertions(+), 313 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 68f1c023812..2e2984958fa 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -58,37 +58,14 @@ func testSweepElasticacheReplicationGroups(region string) error { return nil } -func TestAccAWSElasticacheReplicationGroup_importBasic(t *testing.T) { +func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { oldvar := os.Getenv("AWS_DEFAULT_REGION") os.Setenv("AWS_DEFAULT_REGION", "us-east-1") defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - rName := acctest.RandomWithPrefix("tf-acc-test") - - resourceName := "aws_elasticache_replication_group.bar" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheReplicationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheReplicationGroupConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"apply_immediately"}, //not in the API - }, - }, - }) -} - -func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -98,24 +75,36 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "cluster_mode.#", "0"), + "aws_elasticache_replication_group.test", "cluster_mode.#", "0"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "member_clusters.#", "2"), + "aws_elasticache_replication_group.test", "member_clusters.#", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), + "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, //not in the API + }, }, }) } func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -124,18 +113,30 @@ func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig_Uppercase(strings.ToUpper(rName)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "replication_group_id", rName), + "aws_elasticache_replication_group.test", "replication_group_id", rName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -144,26 +145,31 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "replication_group_description", "test description"), + "aws_elasticache_replication_group.test", "replication_group_description", "test description"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), + "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "replication_group_description", "updated description"), + "aws_elasticache_replication_group.test", "replication_group_description", "updated description"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "true"), + "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "true"), ), }, }, @@ -171,8 +177,14 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -181,17 +193,23 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "maintenance_window", "tue:06:30-tue:07:30"), + "aws_elasticache_replication_group.test", "maintenance_window", "tue:06:30-tue:07:30"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedMaintenanceWindow(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "maintenance_window", "wed:03:00-wed:06:00"), + "aws_elasticache_replication_group.test", "maintenance_window", "wed:03:00-wed:06:00"), ), }, }, @@ -199,8 +217,14 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) } func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -209,22 +233,27 @@ func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "node_type", "cache.m1.small"), + "aws_elasticache_replication_group.test", "node_type", "cache.m1.small"), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "node_type", "cache.m1.medium"), + "aws_elasticache_replication_group.test", "node_type", "cache.m1.medium"), ), }, }, @@ -233,6 +262,10 @@ func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { //This is a test to prove that we panic we get in https://github.com/hashicorp/terraform/issues/9097 func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup parameterGroupResourceName1 := "aws_elasticache_parameter_group.test.0" parameterGroupResourceName2 := "aws_elasticache_parameter_group.test.1" @@ -251,7 +284,12 @@ func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "parameter_group_name", parameterGroupResourceName1, "name"), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfigParameterGroupName(rName, 1), Check: resource.ComposeTestCheckFunc( @@ -264,7 +302,13 @@ func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -273,19 +317,31 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "1"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "1"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "auto_minor_version_upgrade", "false"), + "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "availability_zones"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -294,25 +350,37 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "automatic_failover_enabled", "true"), + "aws_elasticache_replication_group.test", "automatic_failover_enabled", "true"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"), + "aws_elasticache_replication_group.test", "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), + "aws_elasticache_replication_group.test", "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.bar", "primary_endpoint_address"), + "aws_elasticache_replication_group.test", "primary_endpoint_address"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "availability_zones"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -321,27 +389,37 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "number_cache_clusters", "2"), + "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "automatic_failover_enabled", "false"), + "aws_elasticache_replication_group.test", "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_window", "02:00-03:00"), + "aws_elasticache_replication_group.test", "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "7"), + "aws_elasticache_replication_group.test", "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.bar", "primary_endpoint_address"), + "aws_elasticache_replication_group.test", "primary_endpoint_address"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "availability_zones"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_elasticache_replication_group.bar" + resourceName := "aws_elasticache_replication_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -360,14 +438,24 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "configuration_endpoint_address"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") - resourceName := "aws_elasticache_replication_group.bar" + resourceName := "aws_elasticache_replication_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -384,6 +472,12 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. resource.TestCheckResourceAttr(resourceName, "cluster_mode.0.replicas_per_node_group", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupNativeRedisClusterConfig(rName, 1, 1), Check: resource.ComposeTestCheckFunc( @@ -409,6 +503,10 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. } func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") @@ -426,8 +524,13 @@ func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t } func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_elasticache_replication_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -437,18 +540,23 @@ func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "0"), + "aws_elasticache_replication_group.test", "snapshot_retention_limit", "0"), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfigEnableSnapshotting(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "snapshot_retention_limit", "2"), + "aws_elasticache_replication_group.test", "snapshot_retention_limit", "2"), ), }, }, @@ -456,7 +564,13 @@ func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_enableAuthTokenTransitEncryption(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -465,17 +579,29 @@ func TestAccAWSElasticacheReplicationGroup_enableAuthTokenTransitEncryption(t *t { Config: testAccAWSElasticacheReplicationGroup_EnableAuthTokenTransitEncryptionConfig(acctest.RandInt(), acctest.RandString(10), acctest.RandString(16)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "transit_encryption_enabled", "true"), + "aws_elasticache_replication_group.test", "transit_encryption_enabled", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "auth_token", "availability_zones"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup + resourceName := "aws_elasticache_replication_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -484,16 +610,26 @@ func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) { Config: testAccAWSElasticacheReplicationGroup_EnableAtRestEncryptionConfig(acctest.RandInt(), acctest.RandString(10)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.bar", &rg), + testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.bar", "at_rest_encryption_enabled", "true"), + "aws_elasticache_replication_group.test", "at_rest_encryption_enabled", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately", "availability_zones"}, + }, }, }) } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -511,6 +647,12 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { Config: testAccAWSElasticacheReplicationGroupConfig_NumberCacheClusters(rName, 4, false), Check: resource.ComposeTestCheckFunc( @@ -532,6 +674,10 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFailoverDisabled(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -549,6 +695,12 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail resource.TestCheckResourceAttr(resourceName, "number_cache_clusters", "3"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"apply_immediately"}, + }, { PreConfig: func() { // Simulate failover so primary is on node we are trying to delete @@ -577,6 +729,10 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFailoverEnabled(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -736,7 +892,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" @@ -748,19 +904,19 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = %[1]q replication_group_description = "test description" node_type = "cache.m1.small" number_cache_clusters = 2 port = 6379 - security_group_names = ["${aws_elasticache_security_group.bar.name}"] + security_group_names = ["${aws_elasticache_security_group.test.name}"] apply_immediately = true auto_minor_version_upgrade = false maintenance_window = "tue:06:30-tue:07:30" @@ -800,7 +956,7 @@ resource "aws_elasticache_subnet_group" "test" { subnet_ids = ["${aws_subnet.test.*.id[0]}", "${aws_subnet.test.*.id[1]}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { node_type = "cache.t2.micro" number_cache_clusters = 1 port = 6379 @@ -817,7 +973,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" @@ -829,19 +985,19 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = %[1]q replication_group_description = "test description" node_type = "cache.m1.small" number_cache_clusters = 2 port = 6379 - security_group_names = ["${aws_elasticache_security_group.bar.name}"] + security_group_names = ["${aws_elasticache_security_group.test.name}"] apply_immediately = true auto_minor_version_upgrade = false maintenance_window = "tue:06:30-tue:07:30" @@ -885,7 +1041,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" @@ -897,19 +1053,19 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = %[1]q replication_group_description = "updated description" node_type = "cache.m1.small" number_cache_clusters = 2 port = 6379 - security_group_names = ["${aws_elasticache_security_group.bar.name}"] + security_group_names = ["${aws_elasticache_security_group.test.name}"] apply_immediately = true auto_minor_version_upgrade = true } @@ -922,7 +1078,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" @@ -934,19 +1090,19 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = %[1]q replication_group_description = "updated description" node_type = "cache.m1.small" number_cache_clusters = 2 port = 6379 - security_group_names = ["${aws_elasticache_security_group.bar.name}"] + security_group_names = ["${aws_elasticache_security_group.test.name}"] apply_immediately = true auto_minor_version_upgrade = true maintenance_window = "wed:03:00-wed:06:00" @@ -961,7 +1117,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" @@ -973,19 +1129,19 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = %[1]q description = "tf-test-security-group-descr" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = %[1]q replication_group_description = "updated description" node_type = "cache.m1.medium" number_cache_clusters = 2 port = 6379 - security_group_names = ["${aws_elasticache_security_group.bar.name}"] + security_group_names = ["${aws_elasticache_security_group.test.name}"] apply_immediately = true } `, rName) @@ -998,15 +1154,15 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { Name = "terraform-testacc-elasticache-replication-group-in-vpc" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" tags = { @@ -1014,16 +1170,16 @@ resource "aws_subnet" "foo" { } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" - subnet_ids = ["${aws_subnet.foo.id}"] + subnet_ids = ["${aws_subnet.test.id}"] } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 to_port = -1 @@ -1032,14 +1188,14 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%s" replication_group_description = "test description" node_type = "cache.m1.small" number_cache_clusters = 1 port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] availability_zones = ["${data.aws_availability_zones.available.names[0]}"] auto_minor_version_upgrade = false } @@ -1051,45 +1207,40 @@ data "aws_availability_zones" "available" { blacklisted_zone_ids = ["use1-az1"] state = "available" } - -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { Name = "terraform-testacc-elasticache-replication-group-multi-az-in-vpc" } } - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" tags = { Name = "tf-acc-elasticache-replication-group-multi-az-in-vpc-foo" } } - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.16.0/20" availability_zone = "${data.aws_availability_zones.available.names[1]}" tags = { Name = "tf-acc-elasticache-replication-group-multi-az-in-vpc-bar" } } - -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" + "${aws_subnet.test.id}", + "${aws_subnet.test2.id}" ] } - -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 to_port = -1 @@ -1097,15 +1248,14 @@ resource "aws_security_group" "bar" { cidr_blocks = ["0.0.0.0/0"] } } - -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%s" replication_group_description = "test description" node_type = "cache.m1.small" number_cache_clusters = 2 port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] availability_zones = ["${data.aws_availability_zones.available.names[0]}","${data.aws_availability_zones.available.names[1]}"] automatic_failover_enabled = true snapshot_window = "02:00-03:00" @@ -1115,73 +1265,67 @@ resource "aws_elasticache_replication_group" "bar" { var testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig = fmt.Sprintf(` data "aws_availability_zones" "available" { - # InvalidParameterValue: Specified node type cache.m3.medium is not available in AZ us-east-1b. - blacklisted_zone_ids = ["use1-az1"] - state = "available" -} - -resource "aws_vpc" "foo" { - cidr_block = "192.168.0.0/16" - tags = { - Name = "terraform-testacc-elasticache-replication-group-redis-cluster-in-vpc" - } -} - -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.0.0/20" - availability_zone = "${data.aws_availability_zones.available.names[0]}" - tags = { - Name = "tf-acc-elasticache-replication-group-redis-cluster-in-vpc-foo" - } -} - -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" - cidr_block = "192.168.16.0/20" - availability_zone = "${data.aws_availability_zones.available.names[1]}" - tags = { - Name = "tf-acc-elasticache-replication-group-redis-cluster-in-vpc-bar" - } -} - -resource "aws_elasticache_subnet_group" "bar" { - name = "tf-test-cache-subnet-%03d" - description = "tf-test-cache-subnet-group-descr" - subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}" - ] -} - -resource "aws_security_group" "bar" { - name = "tf-test-security-group-%03d" - description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" - ingress { - from_port = -1 - to_port = -1 - protocol = "icmp" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_elasticache_replication_group" "bar" { - replication_group_id = "tf-%s" - replication_group_description = "test description" - node_type = "cache.m3.medium" - number_cache_clusters = "2" - port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] - availability_zones = ["${data.aws_availability_zones.available.names[0]}","${data.aws_availability_zones.available.names[1]}"] - automatic_failover_enabled = false - snapshot_window = "02:00-03:00" - snapshot_retention_limit = 7 - engine_version = "3.2.4" - maintenance_window = "thu:03:00-thu:04:00" -} -`, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) + # InvalidParameterValue: Specified node type cache.m3.medium is not available in AZ us-east-1b. + blacklisted_zone_ids = ["use1-az1"] + state = "available" + } + resource "aws_vpc" "test" { + cidr_block = "192.168.0.0/16" + tags = { + Name = "terraform-testacc-elasticache-replication-group-redis-cluster-in-vpc" + } + } + resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "192.168.0.0/20" + availability_zone = "${data.aws_availability_zones.available.names[0]}" + tags = { + Name = "tf-acc-elasticache-replication-group-redis-cluster-in-vpc-foo" + } + } + resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" + cidr_block = "192.168.16.0/20" + availability_zone = "${data.aws_availability_zones.available.names[1]}" + tags = { + Name = "tf-acc-elasticache-replication-group-redis-cluster-in-vpc-bar" + } + } + resource "aws_elasticache_subnet_group" "test" { + name = "tf-test-cache-subnet-%03d" + description = "tf-test-cache-subnet-group-descr" + subnet_ids = [ + "${aws_subnet.test.id}", + "${aws_subnet.test2.id}" + ] + } + resource "aws_security_group" "test" { + name = "tf-test-security-group-%03d" + description = "tf-test-security-group-descr" + vpc_id = "${aws_vpc.test.id}" + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } + } + resource "aws_elasticache_replication_group" "test" { + replication_group_id = "tf-%s" + replication_group_description = "test description" + node_type = "cache.m3.medium" + number_cache_clusters = "2" + port = 6379 + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] + availability_zones = ["${data.aws_availability_zones.available.names[0]}","${data.aws_availability_zones.available.names[1]}"] + automatic_failover_enabled = false + snapshot_window = "02:00-03:00" + snapshot_retention_limit = 7 + engine_version = "3.2.4" + maintenance_window = "thu:03:00-thu:04:00" + } + `, acctest.RandInt(), acctest.RandInt(), acctest.RandString(10)) func testAccAWSElasticacheReplicationGroupNativeRedisClusterErrorConfig(rInt int, rName string) string { return fmt.Sprintf(` @@ -1189,7 +1333,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { @@ -1197,40 +1341,40 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" tags = { - Name = "tf-acc-elasticache-replication-group-native-redis-cluster-err-foo" + Name = "tf-acc-elasticache-replication-group-native-redis-cluster-err-test" } } -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.16.0/20" availability_zone = "${data.aws_availability_zones.available.names[1]}" tags = { - Name = "tf-acc-elasticache-replication-group-native-redis-cluster-err-bar" + Name = "tf-acc-elasticache-replication-group-native-redis-cluster-err-test" } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}", + "${aws_subnet.test.id}", + "${aws_subnet.test.id}", ] } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 @@ -1240,13 +1384,13 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%s" replication_group_description = "test description" node_type = "cache.t2.micro" port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] automatic_failover_enabled = true cluster_mode { @@ -1265,7 +1409,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { @@ -1273,40 +1417,40 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" tags = { - Name = "tf-acc-elasticache-replication-group-native-redis-cluster-foo" + Name = "tf-acc-elasticache-replication-group-native-redis-cluster-test" } } -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.16.0/20" availability_zone = "${data.aws_availability_zones.available.names[1]}" tags = { - Name = "tf-acc-elasticache-replication-group-native-redis-cluster-bar" + Name = "tf-acc-elasticache-replication-group-native-redis-cluster-test" } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-%[1]s" description = "tf-test-cache-subnet-group-descr" subnet_ids = [ - "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}", + "${aws_subnet.test.id}", + "${aws_subnet.test.id}", ] } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-%[1]s" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 @@ -1316,13 +1460,13 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%[1]s" replication_group_description = "test description" node_type = "cache.t2.micro" port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] automatic_failover_enabled = true cluster_mode { @@ -1339,7 +1483,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { @@ -1347,8 +1491,8 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" @@ -1357,19 +1501,19 @@ resource "aws_subnet" "foo" { } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" subnet_ids = [ - "${aws_subnet.foo.id}", + "${aws_subnet.test.id}", ] } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 @@ -1379,14 +1523,14 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%s" replication_group_description = "test description" node_type = "cache.t2.micro" number_cache_clusters = "1" port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] parameter_group_name = "default.redis3.2" availability_zones = ["${data.aws_availability_zones.available.names[0]}"] engine_version = "3.2.6" @@ -1401,7 +1545,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "192.168.0.0/16" tags = { @@ -1409,8 +1553,8 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "192.168.0.0/20" availability_zone = "${data.aws_availability_zones.available.names[0]}" @@ -1419,19 +1563,19 @@ resource "aws_subnet" "foo" { } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" subnet_ids = [ - "${aws_subnet.foo.id}", + "${aws_subnet.test.id}", ] } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" description = "tf-test-security-group-descr" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" ingress { from_port = -1 @@ -1441,14 +1585,14 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_replication_group" "bar" { +resource "aws_elasticache_replication_group" "test" { replication_group_id = "tf-%s" replication_group_description = "test description" node_type = "cache.t2.micro" number_cache_clusters = "1" port = 6379 - subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" - security_group_ids = ["${aws_security_group.bar.id}"] + subnet_group_name = "${aws_elasticache_subnet_group.test.name}" + security_group_ids = ["${aws_security_group.test.id}"] parameter_group_name = "default.redis3.2" availability_zones = ["${data.aws_availability_zones.available.names[0]}"] engine_version = "3.2.6" diff --git a/aws/resource_aws_elasticache_security_group_test.go b/aws/resource_aws_elasticache_security_group_test.go index 6e9b3d35f61..35276d4b4fd 100644 --- a/aws/resource_aws_elasticache_security_group_test.go +++ b/aws/resource_aws_elasticache_security_group_test.go @@ -67,29 +67,13 @@ func testSweepElasticacheCacheSecurityGroups(region string) error { } func TestAccAWSElasticacheSecurityGroup_basic(t *testing.T) { - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSElasticacheSecurityGroupConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), - resource.TestCheckResourceAttr( - "aws_elasticache_security_group.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - -func TestAccAWSElasticacheSecurityGroup_Import(t *testing.T) { // Use EC2-Classic enabled us-east-1 for testing oldRegion := os.Getenv("AWS_DEFAULT_REGION") os.Setenv("AWS_DEFAULT_REGION", "us-east-1") defer os.Setenv("AWS_DEFAULT_REGION", oldRegion) + resourceName := "aws_elasticache_security_group.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -97,10 +81,14 @@ func TestAccAWSElasticacheSecurityGroup_Import(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSElasticacheSecurityGroupConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSecurityGroupExists(resourceName), + resource.TestCheckResourceAttr( + resourceName, "description", "Managed by Terraform"), + ), }, - { - ResourceName: "aws_elasticache_security_group.bar", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -157,7 +145,7 @@ provider "aws" { region = "us-east-1" } -resource "aws_security_group" "bar" { +resource "aws_security_group" "test" { name = "tf-test-security-group-%03d" ingress { @@ -168,8 +156,8 @@ resource "aws_security_group" "bar" { } } -resource "aws_elasticache_security_group" "bar" { +resource "aws_elasticache_security_group" "test" { name = "tf-test-security-group-%03d" - security_group_names = ["${aws_security_group.bar.name}"] + security_group_names = ["${aws_security_group.test.name}"] } `, acctest.RandInt(), acctest.RandInt()) diff --git a/aws/resource_aws_elasticache_subnet_group_test.go b/aws/resource_aws_elasticache_subnet_group_test.go index ece6014e9bf..0b60ddf3e84 100644 --- a/aws/resource_aws_elasticache_subnet_group_test.go +++ b/aws/resource_aws_elasticache_subnet_group_test.go @@ -12,9 +12,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSElasticacheSubnetGroup_importBasic(t *testing.T) { - resourceName := "aws_elasticache_subnet_group.bar" +func TestAccAWSElasticacheSubnetGroup_basic(t *testing.T) { + var csg elasticache.CacheSubnetGroup config := fmt.Sprintf(testAccAWSElasticacheSubnetGroupConfig, acctest.RandInt()) + resourceName := "aws_elasticache_subnet_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,8 +24,12 @@ func TestAccAWSElasticacheSubnetGroup_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSElasticacheSubnetGroupExists(resourceName, &csg), + resource.TestCheckResourceAttr( + resourceName, "description", "Managed by Terraform"), + ), }, - { ResourceName: resourceName, ImportState: true, @@ -36,33 +41,12 @@ func TestAccAWSElasticacheSubnetGroup_importBasic(t *testing.T) { }) } -func TestAccAWSElasticacheSubnetGroup_basic(t *testing.T) { - var csg elasticache.CacheSubnetGroup - config := fmt.Sprintf(testAccAWSElasticacheSubnetGroupConfig, acctest.RandInt()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSElasticacheSubnetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), - resource.TestCheckResourceAttr( - "aws_elasticache_subnet_group.bar", "description", "Managed by Terraform"), - ), - }, - }, - }) -} - func TestAccAWSElasticacheSubnetGroup_update(t *testing.T) { var csg elasticache.CacheSubnetGroup - rn := "aws_elasticache_subnet_group.bar" - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPre, ri) - postConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPost, ri) + resourceName := "aws_elasticache_subnet_group.test" + rInt := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPre, rInt) + postConfig := fmt.Sprintf(testAccAWSElasticacheSubnetGroupUpdateConfigPost, rInt) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -72,16 +56,22 @@ func TestAccAWSElasticacheSubnetGroup_update(t *testing.T) { { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists(rn, &csg), - testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, rn, 1), + testAccCheckAWSElasticacheSubnetGroupExists(resourceName, &csg), + testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, resourceName, 1), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "description"}, + }, { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheSubnetGroupExists(rn, &csg), - testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, rn, 2), + testAccCheckAWSElasticacheSubnetGroupExists(resourceName, &csg), + testAccCheckAWSElastiCacheSubnetGroupAttrs(&csg, resourceName, 2), ), }, }, @@ -181,7 +171,7 @@ resource "aws_subnet" "foo" { } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { // Including uppercase letters in this name to ensure // that we correctly handle the fact that the API // normalizes names to lowercase. @@ -206,7 +196,7 @@ resource "aws_subnet" "foo" { } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr" subnet_ids = ["${aws_subnet.foo.id}"] @@ -230,21 +220,21 @@ resource "aws_subnet" "foo" { } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.foo.id}" cidr_block = "10.0.2.0/24" availability_zone = "us-west-2a" tags = { - Name = "tf-acc-elasticache-subnet-group-update-bar" + Name = "tf-acc-elasticache-subnet-group-update-test" } } -resource "aws_elasticache_subnet_group" "bar" { +resource "aws_elasticache_subnet_group" "test" { name = "tf-test-cache-subnet-%03d" description = "tf-test-cache-subnet-group-descr-edited" subnet_ids = [ "${aws_subnet.foo.id}", - "${aws_subnet.bar.id}", + "${aws_subnet.test.id}", ] } ` From 69ccb6608846ffb24ee44fe19ceef5906d56a487 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Wed, 16 Oct 2019 15:18:32 +0200 Subject: [PATCH 08/55] remove aws default region handling from elasticache tests --- ..._aws_elasticache_replication_group_test.go | 73 ------------------- 1 file changed, 73 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 2e2984958fa..6d9af3b4e01 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -3,7 +3,6 @@ package aws import ( "fmt" "log" - "os" "regexp" "strings" "testing" @@ -59,10 +58,6 @@ func testSweepElasticacheReplicationGroups(region string) error { } func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -97,10 +92,6 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -129,10 +120,6 @@ func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -177,10 +164,6 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -217,10 +200,6 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) } func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -262,10 +241,6 @@ func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { //This is a test to prove that we panic we get in https://github.com/hashicorp/terraform/issues/9097 func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup parameterGroupResourceName1 := "aws_elasticache_parameter_group.test.0" parameterGroupResourceName2 := "aws_elasticache_parameter_group.test.1" @@ -302,10 +277,6 @@ func TestAccAWSElasticacheReplicationGroup_updateParameterGroup(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" @@ -335,10 +306,6 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" @@ -374,10 +341,6 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" @@ -413,10 +376,6 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -449,10 +408,6 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_Basic(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -503,10 +458,6 @@ func TestAccAWSElasticacheReplicationGroup_ClusterMode_NumNodeGroups(t *testing. } func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - rInt := acctest.RandInt() rName := acctest.RandomWithPrefix("tf-acc-test") @@ -524,10 +475,6 @@ func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t } func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -564,10 +511,6 @@ func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_enableAuthTokenTransitEncryption(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" @@ -595,10 +538,6 @@ func TestAccAWSElasticacheReplicationGroup_enableAuthTokenTransitEncryption(t *t } func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var rg elasticache.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" @@ -626,10 +565,6 @@ func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -674,10 +609,6 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFailoverDisabled(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -729,10 +660,6 @@ func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFail } func TestAccAWSElasticacheReplicationGroup_NumberCacheClusters_Failover_AutoFailoverEnabled(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - var replicationGroup elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" From 3773ed55bf6b777b074ffbc9f8f0887519eb6417 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Fri, 18 Oct 2019 10:42:46 +0200 Subject: [PATCH 09/55] Put the region back for tests that need it --- ..._aws_elasticache_replication_group_test.go | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 6d9af3b4e01..9d22d991ef8 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "log" + "os" "regexp" "strings" "testing" @@ -58,6 +59,10 @@ func testSweepElasticacheReplicationGroups(region string) error { } func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -120,6 +125,10 @@ func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -164,6 +173,10 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { } func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -200,6 +213,10 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) } func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -475,6 +492,10 @@ func TestAccAWSElasticacheReplicationGroup_clusteringAndCacheNodesCausesError(t } func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + var rg elasticache.ReplicationGroup rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" From f34fbf5b521377f273c951a85bfeda659fe8981d Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 21 Oct 2019 12:17:08 +0200 Subject: [PATCH 10/55] Import test refactor for DX resources --- aws/resource_aws_dx_connection_test.go | 65 +++++++++----------- aws/resource_aws_dx_gateway_test.go | 34 ++++------- aws/resource_aws_dx_lag_test.go | 84 ++++++++++++-------------- 3 files changed, 78 insertions(+), 105 deletions(-) diff --git a/aws/resource_aws_dx_connection_test.go b/aws/resource_aws_dx_connection_test.go index f5613c16dd7..eba52fe2590 100644 --- a/aws/resource_aws_dx_connection_test.go +++ b/aws/resource_aws_dx_connection_test.go @@ -11,29 +11,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSDxConnection_importBasic(t *testing.T) { - resourceName := "aws_dx_connection.hoge" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsDxConnectionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDxConnectionConfig(acctest.RandString(5)), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSDxConnection_basic(t *testing.T) { connectionName := fmt.Sprintf("tf-dx-%s", acctest.RandString(5)) + resourceName := "aws_dx_connection.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -43,19 +23,25 @@ func TestAccAWSDxConnection_basic(t *testing.T) { { Config: testAccDxConnectionConfig(connectionName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxConnectionExists("aws_dx_connection.hoge"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "name", connectionName), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "bandwidth", "1Gbps"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "location", "EqSe2"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "tags.%", "0"), + testAccCheckAwsDxConnectionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", connectionName), + resource.TestCheckResourceAttr(resourceName, "bandwidth", "1Gbps"), + resource.TestCheckResourceAttr(resourceName, "location", "EqSe2"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSDxConnection_tags(t *testing.T) { connectionName := fmt.Sprintf("tf-dx-%s", acctest.RandString(5)) + resourceName := "aws_dx_connection.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -65,19 +51,24 @@ func TestAccAWSDxConnection_tags(t *testing.T) { { Config: testAccDxConnectionConfig_tags(connectionName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxConnectionExists("aws_dx_connection.hoge"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "name", connectionName), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "tags.Usage", "original"), + testAccCheckAwsDxConnectionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", connectionName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.Usage", "original"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccDxConnectionConfig_tagsChanged(connectionName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxConnectionExists("aws_dx_connection.hoge"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "name", connectionName), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_dx_connection.hoge", "tags.Usage", "changed"), + testAccCheckAwsDxConnectionExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", connectionName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Usage", "changed"), ), }, }, @@ -122,7 +113,7 @@ func testAccCheckAwsDxConnectionExists(name string) resource.TestCheckFunc { func testAccDxConnectionConfig(n string) string { return fmt.Sprintf(` -resource "aws_dx_connection" "hoge" { +resource "aws_dx_connection" "test" { name = "%s" bandwidth = "1Gbps" location = "EqSe2" @@ -132,7 +123,7 @@ resource "aws_dx_connection" "hoge" { func testAccDxConnectionConfig_tags(n string) string { return fmt.Sprintf(` -resource "aws_dx_connection" "hoge" { +resource "aws_dx_connection" "test" { name = "%s" bandwidth = "1Gbps" location = "EqSe2" @@ -147,7 +138,7 @@ resource "aws_dx_connection" "hoge" { func testAccDxConnectionConfig_tagsChanged(n string) string { return fmt.Sprintf(` -resource "aws_dx_connection" "hoge" { +resource "aws_dx_connection" "test" { name = "%s" bandwidth = "1Gbps" location = "EqSe2" diff --git a/aws/resource_aws_dx_gateway_test.go b/aws/resource_aws_dx_gateway_test.go index 6072379e037..4ad53bb19f8 100644 --- a/aws/resource_aws_dx_gateway_test.go +++ b/aws/resource_aws_dx_gateway_test.go @@ -115,7 +115,7 @@ func testSweepDirectConnectGateways(region string) error { return nil } -func TestAccAwsDxGateway_importBasic(t *testing.T) { +func TestAccAwsDxGateway_basic(t *testing.T) { resourceName := "aws_dx_gateway.test" resource.ParallelTest(t, resource.TestCase{ @@ -125,8 +125,11 @@ func TestAccAwsDxGateway_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDxGatewayConfig(acctest.RandString(5), randIntRange(64512, 65534)), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsDxGatewayExists(resourceName), + testAccCheckResourceAttrAccountID(resourceName, "owner_account_id"), + ), }, - { ResourceName: resourceName, ImportState: true, @@ -136,7 +139,7 @@ func TestAccAwsDxGateway_importBasic(t *testing.T) { }) } -func TestAccAwsDxGateway_importComplex(t *testing.T) { +func TestAccAwsDxGateway_complex(t *testing.T) { checkFn := func(s []*terraform.InstanceState) error { if len(s) != 3 { return fmt.Errorf("Got %d resources, expected 3. State: %#v", len(s), s) @@ -147,6 +150,7 @@ func TestAccAwsDxGateway_importComplex(t *testing.T) { rName1 := fmt.Sprintf("terraform-testacc-dxgwassoc-%d", acctest.RandInt()) rName2 := fmt.Sprintf("terraform-testacc-dxgwassoc-%d", acctest.RandInt()) rBgpAsn := randIntRange(64512, 65534) + resourceName := "aws_dx_gateway.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -155,10 +159,13 @@ func TestAccAwsDxGateway_importComplex(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDxGatewayAssociationConfig_multiVpnGatewaysSingleAccount(rName1, rName2, rBgpAsn), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsDxGatewayExists(resourceName), + testAccCheckResourceAttrAccountID(resourceName, "owner_account_id"), + ), }, - { - ResourceName: "aws_dx_gateway.test", + ResourceName: resourceName, ImportState: true, ImportStateCheck: checkFn, ImportStateVerify: true, @@ -167,23 +174,6 @@ func TestAccAwsDxGateway_importComplex(t *testing.T) { }) } -func TestAccAwsDxGateway_basic(t *testing.T) { - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsDxGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDxGatewayConfig(acctest.RandString(5), randIntRange(64512, 65534)), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxGatewayExists("aws_dx_gateway.test"), - testAccCheckResourceAttrAccountID("aws_dx_gateway.test", "owner_account_id"), - ), - }, - }, - }) -} - func testAccCheckAwsDxGatewayDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).dxconn diff --git a/aws/resource_aws_dx_lag_test.go b/aws/resource_aws_dx_lag_test.go index 7b8fecf9100..a03c9a0d47f 100644 --- a/aws/resource_aws_dx_lag_test.go +++ b/aws/resource_aws_dx_lag_test.go @@ -11,31 +11,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSDxLag_importBasic(t *testing.T) { - resourceName := "aws_dx_lag.hoge" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsDxLagDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDxLagConfig(acctest.RandString(5)), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy"}, - }, - }, - }) -} - func TestAccAWSDxLag_basic(t *testing.T) { lagName1 := fmt.Sprintf("tf-dx-lag-%s", acctest.RandString(5)) lagName2 := fmt.Sprintf("tf-dx-lag-%s", acctest.RandString(5)) + resourceName := "aws_dx_lag.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -45,21 +24,27 @@ func TestAccAWSDxLag_basic(t *testing.T) { { Config: testAccDxLagConfig(lagName1), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxLagExists("aws_dx_lag.hoge"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "name", lagName1), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "connections_bandwidth", "1Gbps"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "location", "EqSe2"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.%", "0"), + testAccCheckAwsDxLagExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", lagName1), + resource.TestCheckResourceAttr(resourceName, "connections_bandwidth", "1Gbps"), + resource.TestCheckResourceAttr(resourceName, "location", "EqSe2"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, { Config: testAccDxLagConfig(lagName2), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxLagExists("aws_dx_lag.hoge"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "name", lagName2), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "connections_bandwidth", "1Gbps"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "location", "EqSe2"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.%", "0"), + testAccCheckAwsDxLagExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", lagName2), + resource.TestCheckResourceAttr(resourceName, "connections_bandwidth", "1Gbps"), + resource.TestCheckResourceAttr(resourceName, "location", "EqSe2"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -68,6 +53,7 @@ func TestAccAWSDxLag_basic(t *testing.T) { func TestAccAWSDxLag_tags(t *testing.T) { lagName := fmt.Sprintf("tf-dx-lag-%s", acctest.RandString(5)) + resourceName := "aws_dx_lag.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -77,27 +63,33 @@ func TestAccAWSDxLag_tags(t *testing.T) { { Config: testAccDxLagConfig_tags(lagName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxLagExists("aws_dx_lag.hoge"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "name", lagName), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.Usage", "original"), + testAccCheckAwsDxLagExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", lagName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.Usage", "original"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, { Config: testAccDxLagConfig_tagsChanged(lagName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxLagExists("aws_dx_lag.hoge"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "name", lagName), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.Usage", "changed"), + testAccCheckAwsDxLagExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", lagName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Usage", "changed"), ), }, { Config: testAccDxLagConfig(lagName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDxLagExists("aws_dx_lag.hoge"), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "name", lagName), - resource.TestCheckResourceAttr("aws_dx_lag.hoge", "tags.%", "0"), + testAccCheckAwsDxLagExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", lagName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -143,7 +135,7 @@ func testAccCheckAwsDxLagExists(name string) resource.TestCheckFunc { func testAccDxLagConfig(n string) string { return fmt.Sprintf(` -resource "aws_dx_lag" "hoge" { +resource "aws_dx_lag" "test" { name = "%s" connections_bandwidth = "1Gbps" location = "EqSe2" @@ -154,7 +146,7 @@ resource "aws_dx_lag" "hoge" { func testAccDxLagConfig_tags(n string) string { return fmt.Sprintf(` -resource "aws_dx_lag" "hoge" { +resource "aws_dx_lag" "test" { name = "%s" connections_bandwidth = "1Gbps" location = "EqSe2" @@ -170,7 +162,7 @@ resource "aws_dx_lag" "hoge" { func testAccDxLagConfig_tagsChanged(n string) string { return fmt.Sprintf(` -resource "aws_dx_lag" "hoge" { +resource "aws_dx_lag" "test" { name = "%s" connections_bandwidth = "1Gbps" location = "EqSe2" From 431b6cd7dc6732a69a6b0a5760be6118b6a034b0 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 21 Oct 2019 15:09:12 +0200 Subject: [PATCH 11/55] Import test refactor for directory service directory --- ...ce_aws_directory_service_directory_test.go | 230 ++++++++++-------- 1 file changed, 131 insertions(+), 99 deletions(-) diff --git a/aws/resource_aws_directory_service_directory_test.go b/aws/resource_aws_directory_service_directory_test.go index 6399577fe03..7060e76838b 100644 --- a/aws/resource_aws_directory_service_directory_test.go +++ b/aws/resource_aws_directory_service_directory_test.go @@ -80,23 +80,23 @@ func TestDiffTagsDirectoryService(t *testing.T) { // Basic add/remove { Old: map[string]interface{}{ - "foo": "bar", + "foo": "test", }, New: map[string]interface{}{ - "bar": "baz", + "test": "baz", }, Create: map[string]string{ - "bar": "baz", + "test": "baz", }, Remove: map[string]string{ - "foo": "bar", + "foo": "test", }, }, // Modify { Old: map[string]interface{}{ - "foo": "bar", + "foo": "test", }, New: map[string]interface{}{ "foo": "baz", @@ -105,7 +105,7 @@ func TestDiffTagsDirectoryService(t *testing.T) { "foo": "baz", }, Remove: map[string]string{ - "foo": "bar", + "foo": "test", }, }, } @@ -123,8 +123,8 @@ func TestDiffTagsDirectoryService(t *testing.T) { } } -func TestAccAWSDirectoryServiceDirectory_importBasic(t *testing.T) { - resourceName := "aws_directory_service_directory.bar" +func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) { + resourceName := "aws_directory_service_directory.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -137,6 +137,10 @@ func TestAccAWSDirectoryServiceDirectory_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDirectoryServiceDirectoryConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttrSet("aws_directory_service_directory.test", "security_group_id"), + ), }, { ResourceName: resourceName, @@ -150,28 +154,9 @@ func TestAccAWSDirectoryServiceDirectory_importBasic(t *testing.T) { }) } -func TestAccAWSDirectoryServiceDirectory_basic(t *testing.T) { - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { - testAccPreCheck(t) - testAccPreCheckAWSDirectoryService(t) - testAccPreCheckAWSDirectoryServiceSimpleDirectory(t) - }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDirectoryServiceDirectoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDirectoryServiceDirectoryConfig, - Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttrSet("aws_directory_service_directory.bar", "security_group_id"), - ), - }, - }, - }) -} - func TestAccAWSDirectoryServiceDirectory_tags(t *testing.T) { + resourceName := "aws_directory_service_directory.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -184,28 +169,36 @@ func TestAccAWSDirectoryServiceDirectory_tags(t *testing.T) { { Config: testAccDirectoryServiceDirectoryTagsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.foo", "bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.project", "test"), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.%", "2"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.foo", "test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.project", "test"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", + }, + }, { Config: testAccDirectoryServiceDirectoryUpdateTagsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.%", "3"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.foo", "bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.project", "test2"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.fizz", "buzz"), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.%", "3"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.foo", "test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.project", "test2"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.fizz", "buzz"), ), }, { Config: testAccDirectoryServiceDirectoryRemoveTagsConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "tags.foo", "bar"), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.%", "1"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "tags.foo", "test"), ), }, }, @@ -213,6 +206,8 @@ func TestAccAWSDirectoryServiceDirectory_tags(t *testing.T) { } func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) { + resourceName := "aws_directory_service_directory.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDirectoryService(t) }, Providers: testAccProviders, @@ -221,15 +216,25 @@ func TestAccAWSDirectoryServiceDirectory_microsoft(t *testing.T) { { Config: testAccDirectoryServiceDirectoryConfig_microsoft, Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "edition", directoryservice.DirectoryEditionEnterprise), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "edition", directoryservice.DirectoryEditionEnterprise), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", + }, + }, }, }) } func TestAccAWSDirectoryServiceDirectory_microsoftStandard(t *testing.T) { + resourceName := "aws_directory_service_directory.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDirectoryService(t) }, Providers: testAccProviders, @@ -238,15 +243,25 @@ func TestAccAWSDirectoryServiceDirectory_microsoftStandard(t *testing.T) { { Config: testAccDirectoryServiceDirectoryConfig_microsoftStandard, Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar"), - resource.TestCheckResourceAttr("aws_directory_service_directory.bar", "edition", directoryservice.DirectoryEditionStandard), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test"), + resource.TestCheckResourceAttr("aws_directory_service_directory.test", "edition", directoryservice.DirectoryEditionStandard), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", + }, + }, }, }) } func TestAccAWSDirectoryServiceDirectory_connector(t *testing.T) { + resourceName := "aws_directory_service_directory.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) @@ -263,12 +278,21 @@ func TestAccAWSDirectoryServiceDirectory_connector(t *testing.T) { resource.TestCheckResourceAttrSet("aws_directory_service_directory.connector", "security_group_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", + }, + }, }, }) } func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) { alias := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_directory_service_directory.test2" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -282,25 +306,33 @@ func TestAccAWSDirectoryServiceDirectory_withAliasAndSso(t *testing.T) { { Config: testAccDirectoryServiceDirectoryConfig_withAlias(alias), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", alias), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test2"), + testAccCheckServiceDirectoryAlias("aws_directory_service_directory.test2", alias), + testAccCheckServiceDirectorySso("aws_directory_service_directory.test2", false), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", + }, + }, { Config: testAccDirectoryServiceDirectoryConfig_withSso(alias), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", alias), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", true), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test2"), + testAccCheckServiceDirectoryAlias("aws_directory_service_directory.test2", alias), + testAccCheckServiceDirectorySso("aws_directory_service_directory.test2", true), ), }, { Config: testAccDirectoryServiceDirectoryConfig_withSso_modified(alias), Check: resource.ComposeTestCheckFunc( - testAccCheckServiceDirectoryExists("aws_directory_service_directory.bar_a"), - testAccCheckServiceDirectoryAlias("aws_directory_service_directory.bar_a", alias), - testAccCheckServiceDirectorySso("aws_directory_service_directory.bar_a", false), + testAccCheckServiceDirectoryExists("aws_directory_service_directory.test2"), + testAccCheckServiceDirectoryAlias("aws_directory_service_directory.test2", alias), + testAccCheckServiceDirectorySso("aws_directory_service_directory.test2", false), ), }, }, @@ -471,14 +503,14 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -497,12 +529,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-bar" + Name = "tf-acc-directory-service-directory-test" } } ` @@ -512,18 +544,18 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } tags = { - foo = "bar" + foo = "test" project = "test" } } @@ -543,12 +575,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-tags-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-tags-bar" + Name = "tf-acc-directory-service-directory-tags-test" } } ` @@ -558,18 +590,18 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } tags = { - foo = "bar" + foo = "test" project = "test2" fizz = "buzz" } @@ -590,12 +622,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-tags-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-tags-bar" + Name = "tf-acc-directory-service-directory-tags-test" } } ` @@ -605,18 +637,18 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } tags = { - foo = "bar" + foo = "test" } } @@ -635,12 +667,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-tags-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-tags-bar" + Name = "tf-acc-directory-service-directory-tags-test" } } ` @@ -650,14 +682,14 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -668,10 +700,10 @@ resource "aws_directory_service_directory" "connector" { type = "ADConnector" connect_settings { - customer_dns_ips = aws_directory_service_directory.bar.dns_ip_addresses + customer_dns_ips = aws_directory_service_directory.test.dns_ip_addresses customer_username = "Administrator" vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -690,12 +722,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-connector-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-connector-bar" + Name = "tf-acc-directory-service-directory-connector-test" } } ` @@ -705,14 +737,14 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" type = "MicrosoftAD" vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -731,12 +763,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-microsoft-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-microsoft-bar" + Name = "tf-acc-directory-service-directory-microsoft-test" } } ` @@ -746,7 +778,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar" { +resource "aws_directory_service_directory" "test" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" type = "MicrosoftAD" @@ -754,7 +786,7 @@ resource "aws_directory_service_directory" "bar" { vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -773,12 +805,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-microsoft-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-microsoft-bar" + Name = "tf-acc-directory-service-directory-microsoft-test" } } ` @@ -789,7 +821,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar_a" { +resource "aws_directory_service_directory" "test2" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" @@ -797,7 +829,7 @@ resource "aws_directory_service_directory" "bar_a" { vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -816,12 +848,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-with-alias-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-with-alias-bar" + Name = "tf-acc-directory-service-directory-with-alias-test" } } `, alias) @@ -833,7 +865,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar_a" { +resource "aws_directory_service_directory" "test2" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" @@ -842,7 +874,7 @@ resource "aws_directory_service_directory" "bar_a" { vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -861,12 +893,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-with-sso-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-with-sso-bar" + Name = "tf-acc-directory-service-directory-with-sso-test" } } `, alias) @@ -878,7 +910,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_directory_service_directory" "bar_a" { +resource "aws_directory_service_directory" "test2" { name = "corp.notexample.com" password = "SuperSecretPassw0rd" size = "Small" @@ -887,7 +919,7 @@ resource "aws_directory_service_directory" "bar_a" { vpc_settings { vpc_id = "${aws_vpc.main.id}" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.test.id}"] } } @@ -906,12 +938,12 @@ resource "aws_subnet" "foo" { Name = "tf-acc-directory-service-directory-with-sso-foo" } } -resource "aws_subnet" "bar" { +resource "aws_subnet" "test" { vpc_id = "${aws_vpc.main.id}" availability_zone = "${data.aws_availability_zones.available.names[1]}" cidr_block = "10.0.2.0/24" tags = { - Name = "tf-acc-directory-service-directory-with-sso-bar" + Name = "tf-acc-directory-service-directory-with-sso-test" } } `, alias) From f9c1b64f850c7d690442cc0ae1444798d148aa3e Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 21 Oct 2019 17:54:32 +0200 Subject: [PATCH 12/55] Import test refactor for db resources --- ...resource_aws_db_event_subscription_test.go | 136 ++--- aws/resource_aws_db_parameter_group_test.go | 574 +++++++++--------- aws/resource_aws_db_security_group_test.go | 51 +- aws/resource_aws_db_subnet_group_test.go | 102 ++-- 4 files changed, 425 insertions(+), 438 deletions(-) diff --git a/aws/resource_aws_db_event_subscription_test.go b/aws/resource_aws_db_event_subscription_test.go index a8643a3c4f5..102b68c7d47 100644 --- a/aws/resource_aws_db_event_subscription_test.go +++ b/aws/resource_aws_db_event_subscription_test.go @@ -13,9 +13,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSDBEventSubscription_importBasic(t *testing.T) { - resourceName := "aws_db_event_subscription.bar" +func TestAccAWSDBEventSubscription_basicUpdate(t *testing.T) { + var v rds.EventSubscription rInt := acctest.RandInt() + resourceName := "aws_db_event_subscription.test" subscriptionName := fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt) resource.ParallelTest(t, resource.TestCase{ @@ -25,48 +26,30 @@ func TestAccAWSDBEventSubscription_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSDBEventSubscriptionConfig(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:[^:]+:es:%s$", subscriptionName))), + resource.TestCheckResourceAttr(resourceName, "enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "source_type", "db-instance"), + resource.TestCheckResourceAttr(resourceName, "name", subscriptionName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "name"), + ), }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateId: subscriptionName, }, - }, - }) -} - -func TestAccAWSDBEventSubscription_basicUpdate(t *testing.T) { - var v rds.EventSubscription - rInt := acctest.RandInt() - rName := fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBEventSubscriptionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBEventSubscriptionConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestMatchResourceAttr("aws_db_event_subscription.bar", "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:[^:]+:es:%s$", rName))), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "enabled", "true"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "source_type", "db-instance"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "name", rName), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "tags.Name", "name"), - ), - }, { Config: testAccAWSDBEventSubscriptionConfigUpdate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "enabled", "false"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "source_type", "db-parameter-group"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_db_event_subscription.bar", "tags.Name", "new-name"), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "source_type", "db-parameter-group"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "new-name"), ), }, }, @@ -76,7 +59,7 @@ func TestAccAWSDBEventSubscription_basicUpdate(t *testing.T) { func TestAccAWSDBEventSubscription_disappears(t *testing.T) { var eventSubscription rds.EventSubscription rInt := acctest.RandInt() - resourceName := "aws_db_event_subscription.bar" + resourceName := "aws_db_event_subscription.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -99,6 +82,7 @@ func TestAccAWSDBEventSubscription_withPrefix(t *testing.T) { var v rds.EventSubscription rInt := acctest.RandInt() startsWithPrefix := regexp.MustCompile("^tf-acc-test-rds-event-subs-") + resourceName := "aws_db_event_subscription.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -108,15 +92,15 @@ func TestAccAWSDBEventSubscription_withPrefix(t *testing.T) { { Config: testAccAWSDBEventSubscriptionConfigWithPrefix(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), + resourceName, "enabled", "true"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-instance"), + resourceName, "source_type", "db-instance"), resource.TestMatchResourceAttr( - "aws_db_event_subscription.bar", "name", startsWithPrefix), + resourceName, "name", startsWithPrefix), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "tags.Name", "name"), + resourceName, "tags.Name", "name"), ), }, }, @@ -126,6 +110,8 @@ func TestAccAWSDBEventSubscription_withPrefix(t *testing.T) { func TestAccAWSDBEventSubscription_withSourceIds(t *testing.T) { var v rds.EventSubscription rInt := acctest.RandInt() + resourceName := "aws_db_event_subscription.test" + subscriptionName := fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -135,29 +121,35 @@ func TestAccAWSDBEventSubscription_withSourceIds(t *testing.T) { { Config: testAccAWSDBEventSubscriptionConfigWithSourceIds(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), + resourceName, "enabled", "true"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-parameter-group"), + resourceName, "source_type", "db-parameter-group"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), + resourceName, "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_ids.#", "1"), + resourceName, "source_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateId: subscriptionName, + }, { Config: testAccAWSDBEventSubscriptionConfigUpdateSourceIds(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), + resourceName, "enabled", "true"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-parameter-group"), + resourceName, "source_type", "db-parameter-group"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), + resourceName, "name", fmt.Sprintf("tf-acc-test-rds-event-subs-with-ids-%d", rInt)), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_ids.#", "2"), + resourceName, "source_ids.#", "2"), ), }, }, @@ -167,6 +159,8 @@ func TestAccAWSDBEventSubscription_withSourceIds(t *testing.T) { func TestAccAWSDBEventSubscription_categoryUpdate(t *testing.T) { var v rds.EventSubscription rInt := acctest.RandInt() + resourceName := "aws_db_event_subscription.test" + subscriptionName := fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -176,23 +170,29 @@ func TestAccAWSDBEventSubscription_categoryUpdate(t *testing.T) { { Config: testAccAWSDBEventSubscriptionConfig(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), + resourceName, "enabled", "true"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-instance"), + resourceName, "source_type", "db-instance"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "name", fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt)), + resourceName, "name", fmt.Sprintf("tf-acc-test-rds-event-subs-%d", rInt)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateId: subscriptionName, + }, { Config: testAccAWSDBEventSubscriptionConfigUpdateCategories(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBEventSubscriptionExists("aws_db_event_subscription.bar", &v), + testAccCheckAWSDBEventSubscriptionExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "enabled", "true"), + resourceName, "enabled", "true"), resource.TestCheckResourceAttr( - "aws_db_event_subscription.bar", "source_type", "db-instance"), + resourceName, "source_type", "db-instance"), ), }, }, @@ -278,7 +278,7 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name = "tf-acc-test-rds-event-subs-%d" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" source_type = "db-instance" @@ -304,7 +304,7 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name_prefix = "tf-acc-test-rds-event-subs-" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" source_type = "db-instance" @@ -330,7 +330,7 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name = "tf-acc-test-rds-event-subs-%d" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" enabled = false @@ -353,17 +353,17 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "db-parameter-group-event-%d" family = "mysql5.6" description = "Test parameter group for terraform" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name = "tf-acc-test-rds-event-subs-with-ids-%d" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" source_type = "db-parameter-group" - source_ids = ["${aws_db_parameter_group.bar.id}"] + source_ids = ["${aws_db_parameter_group.test.id}"] event_categories = [ "configuration change", @@ -382,23 +382,23 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "db-parameter-group-event-%d" family = "mysql5.6" description = "Test parameter group for terraform" } -resource "aws_db_parameter_group" "foo" { +resource "aws_db_parameter_group" "test2" { name = "db-parameter-group-event-2-%d" family = "mysql5.6" description = "Test parameter group for terraform" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name = "tf-acc-test-rds-event-subs-with-ids-%d" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" source_type = "db-parameter-group" - source_ids = ["${aws_db_parameter_group.bar.id}", "${aws_db_parameter_group.foo.id}"] + source_ids = ["${aws_db_parameter_group.test.id}", "${aws_db_parameter_group.test2.id}"] event_categories = [ "configuration change", @@ -417,7 +417,7 @@ resource "aws_sns_topic" "aws_sns_topic" { name = "tf-acc-test-rds-event-subs-sns-topic-%d" } -resource "aws_db_event_subscription" "bar" { +resource "aws_db_event_subscription" "test" { name = "tf-acc-test-rds-event-subs-%d" sns_topic = "${aws_sns_topic.aws_sns_topic.arn}" source_type = "db-instance" diff --git a/aws/resource_aws_db_parameter_group_test.go b/aws/resource_aws_db_parameter_group_test.go index 1ea7bc1795e..c665b5b20c1 100644 --- a/aws/resource_aws_db_parameter_group_test.go +++ b/aws/resource_aws_db_parameter_group_test.go @@ -73,231 +73,9 @@ func testSweepRdsDbParameterGroups(region string) error { return nil } -func TestAccAWSDBParameterGroup_importBasic(t *testing.T) { - resourceName := "aws_db_parameter_group.bar" - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBParameterGroupConfig(groupName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSDBParameterGroup_limit(t *testing.T) { - var v rds.DBParameterGroup - - groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: createAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "RDS default parameter group: Exceed default AWS parameter group limit of twenty"), - - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "ON"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "Barracuda"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "FILE"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "REPEATABLE-READ"), - ), - }, - { - Config: updateAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), - testAccCheckAWSDBParameterGroupAttributes(&v, groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty"), - - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "ON"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "Barracuda"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "FILE"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), - resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "REPEATABLE-READ"), - ), - }, - }, - }) -} - func TestAccAWSDBParameterGroup_basic(t *testing.T) { var v rds.DBParameterGroup - + resourceName := "aws_db_parameter_group.test" groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -308,67 +86,277 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) { { Config: testAccAWSDBParameterGroupConfig(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), testAccCheckAWSDBParameterGroupAttributes(&v, groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), + resourceName, "name", groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), + resourceName, "family", "mysql5.6"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Managed by Terraform"), + resourceName, "description", "Managed by Terraform"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), + resourceName, "parameter.1708034931.name", "character_set_results"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.value", "utf8"), + resourceName, "parameter.1708034931.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), + resourceName, "parameter.2421266705.name", "character_set_server"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), + resourceName, "parameter.2421266705.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), + resourceName, "parameter.2478663599.name", "character_set_client"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), + resourceName, "parameter.2478663599.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "tags.%", "1"), + resourceName, "tags.%", "1"), resource.TestMatchResourceAttr( - "aws_db_parameter_group.bar", "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:pg:%s", groupName))), + resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:pg:%s", groupName))), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSDBParameterGroupAddParametersConfig(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), testAccCheckAWSDBParameterGroupAttributes(&v, groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), + resourceName, "name", groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), + resourceName, "family", "mysql5.6"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Test parameter group for terraform"), + resourceName, "description", "Test parameter group for terraform"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1706463059.name", "collation_connection"), + resourceName, "parameter.1706463059.name", "collation_connection"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1706463059.value", "utf8_unicode_ci"), + resourceName, "parameter.1706463059.value", "utf8_unicode_ci"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.name", "character_set_results"), + resourceName, "parameter.1708034931.name", "character_set_results"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.1708034931.value", "utf8"), + resourceName, "parameter.1708034931.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), + resourceName, "parameter.2421266705.name", "character_set_server"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), + resourceName, "parameter.2421266705.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2475805061.name", "collation_server"), + resourceName, "parameter.2475805061.name", "collation_server"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2475805061.value", "utf8_unicode_ci"), + resourceName, "parameter.2475805061.value", "utf8_unicode_ci"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), + resourceName, "parameter.2478663599.name", "character_set_client"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), + resourceName, "parameter.2478663599.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "tags.%", "2"), + resourceName, "tags.%", "2"), resource.TestMatchResourceAttr( - "aws_db_parameter_group.bar", "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:pg:%s", groupName))), + resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:pg:%s", groupName))), + ), + }, + }, + }) +} + +func TestAccAWSDBParameterGroup_limit(t *testing.T) { + var v rds.DBParameterGroup + resourceName := "aws_db_parameter_group.test" + groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + { + Config: createAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists(resourceName, &v), + testAccCheckAWSDBParameterGroupAttributes(&v, groupName), + resource.TestCheckResourceAttr(resourceName, "name", groupName), + resource.TestCheckResourceAttr(resourceName, "family", "mysql5.6"), + resource.TestCheckResourceAttr(resourceName, "description", "RDS default parameter group: Exceed default AWS parameter group limit of twenty"), + + resource.TestCheckResourceAttr(resourceName, "parameter.2421266705.name", "character_set_server"), + resource.TestCheckResourceAttr(resourceName, "parameter.2421266705.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.2478663599.name", "character_set_client"), + resource.TestCheckResourceAttr(resourceName, "parameter.2478663599.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1680942586.name", "collation_server"), + resource.TestCheckResourceAttr(resourceName, "parameter.1680942586.value", "utf8_general_ci"), + resource.TestCheckResourceAttr(resourceName, "parameter.2450940716.name", "collation_connection"), + resource.TestCheckResourceAttr(resourceName, "parameter.2450940716.value", "utf8_general_ci"), + resource.TestCheckResourceAttr(resourceName, "parameter.242489837.name", "join_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.242489837.value", "16777216"), + resource.TestCheckResourceAttr(resourceName, "parameter.2026669454.name", "key_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2026669454.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.2705275319.name", "max_connections"), + resource.TestCheckResourceAttr(resourceName, "parameter.2705275319.value", "3200"), + resource.TestCheckResourceAttr(resourceName, "parameter.3512697936.name", "max_heap_table_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.3512697936.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.780730667.name", "performance_schema"), + resource.TestCheckResourceAttr(resourceName, "parameter.780730667.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.2020346918.name", "performance_schema_users_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2020346918.value", "1048576"), + resource.TestCheckResourceAttr(resourceName, "parameter.1460834103.name", "query_cache_limit"), + resource.TestCheckResourceAttr(resourceName, "parameter.1460834103.value", "2097152"), + resource.TestCheckResourceAttr(resourceName, "parameter.484865451.name", "query_cache_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.484865451.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.255276438.name", "sort_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.255276438.value", "16777216"), + resource.TestCheckResourceAttr(resourceName, "parameter.2981725119.name", "table_open_cache"), + resource.TestCheckResourceAttr(resourceName, "parameter.2981725119.value", "4096"), + resource.TestCheckResourceAttr(resourceName, "parameter.2703661820.name", "tmp_table_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2703661820.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.2386583229.name", "binlog_cache_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2386583229.value", "131072"), + resource.TestCheckResourceAttr(resourceName, "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), + resource.TestCheckResourceAttr(resourceName, "parameter.4012389720.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.2688783017.name", "innodb_open_files"), + resource.TestCheckResourceAttr(resourceName, "parameter.2688783017.value", "4000"), + resource.TestCheckResourceAttr(resourceName, "parameter.782983977.name", "innodb_read_io_threads"), + resource.TestCheckResourceAttr(resourceName, "parameter.782983977.value", "64"), + resource.TestCheckResourceAttr(resourceName, "parameter.2809980413.name", "innodb_thread_concurrency"), + resource.TestCheckResourceAttr(resourceName, "parameter.2809980413.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.3599115250.name", "innodb_write_io_threads"), + resource.TestCheckResourceAttr(resourceName, "parameter.3599115250.value", "64"), + resource.TestCheckResourceAttr(resourceName, "parameter.2557156277.name", "character_set_connection"), + resource.TestCheckResourceAttr(resourceName, "parameter.2557156277.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.2475346812.name", "character_set_database"), + resource.TestCheckResourceAttr(resourceName, "parameter.2475346812.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1986528518.name", "character_set_filesystem"), + resource.TestCheckResourceAttr(resourceName, "parameter.1986528518.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1708034931.name", "character_set_results"), + resource.TestCheckResourceAttr(resourceName, "parameter.1708034931.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1937131004.name", "event_scheduler"), + resource.TestCheckResourceAttr(resourceName, "parameter.1937131004.value", "ON"), + resource.TestCheckResourceAttr(resourceName, "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), + resource.TestCheckResourceAttr(resourceName, "parameter.3437079877.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.1092112861.name", "innodb_file_format"), + resource.TestCheckResourceAttr(resourceName, "parameter.1092112861.value", "barracuda"), + resource.TestCheckResourceAttr(resourceName, "parameter.615571931.name", "innodb_io_capacity"), + resource.TestCheckResourceAttr(resourceName, "parameter.615571931.value", "2000"), + resource.TestCheckResourceAttr(resourceName, "parameter.1065962799.name", "innodb_io_capacity_max"), + resource.TestCheckResourceAttr(resourceName, "parameter.1065962799.value", "3000"), + resource.TestCheckResourceAttr(resourceName, "parameter.1411161182.name", "innodb_lock_wait_timeout"), + resource.TestCheckResourceAttr(resourceName, "parameter.1411161182.value", "120"), + resource.TestCheckResourceAttr(resourceName, "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), + resource.TestCheckResourceAttr(resourceName, "parameter.3133315879.value", "90"), + resource.TestCheckResourceAttr(resourceName, "parameter.950177639.name", "log_bin_trust_function_creators"), + resource.TestCheckResourceAttr(resourceName, "parameter.950177639.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.591700516.name", "log_warnings"), + resource.TestCheckResourceAttr(resourceName, "parameter.591700516.value", "2"), + resource.TestCheckResourceAttr(resourceName, "parameter.1918306725.name", "log_output"), + resource.TestCheckResourceAttr(resourceName, "parameter.1918306725.value", "FILE"), + resource.TestCheckResourceAttr(resourceName, "parameter.386204433.name", "max_allowed_packet"), + resource.TestCheckResourceAttr(resourceName, "parameter.386204433.value", "1073741824"), + resource.TestCheckResourceAttr(resourceName, "parameter.1700901269.name", "max_connect_errors"), + resource.TestCheckResourceAttr(resourceName, "parameter.1700901269.value", "100"), + resource.TestCheckResourceAttr(resourceName, "parameter.2839701698.name", "query_cache_min_res_unit"), + resource.TestCheckResourceAttr(resourceName, "parameter.2839701698.value", "512"), + resource.TestCheckResourceAttr(resourceName, "parameter.427634017.name", "slow_query_log"), + resource.TestCheckResourceAttr(resourceName, "parameter.427634017.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.881816039.name", "sync_binlog"), + resource.TestCheckResourceAttr(resourceName, "parameter.881816039.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.748684209.name", "tx_isolation"), + resource.TestCheckResourceAttr(resourceName, "parameter.748684209.value", "REPEATABLE-READ"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: updateAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists(resourceName, &v), + testAccCheckAWSDBParameterGroupAttributes(&v, groupName), + resource.TestCheckResourceAttr(resourceName, "name", groupName), + resource.TestCheckResourceAttr(resourceName, "family", "mysql5.6"), + resource.TestCheckResourceAttr(resourceName, "description", "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty"), + + resource.TestCheckResourceAttr(resourceName, "parameter.2421266705.name", "character_set_server"), + resource.TestCheckResourceAttr(resourceName, "parameter.2421266705.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.2478663599.name", "character_set_client"), + resource.TestCheckResourceAttr(resourceName, "parameter.2478663599.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1680942586.name", "collation_server"), + resource.TestCheckResourceAttr(resourceName, "parameter.1680942586.value", "utf8_general_ci"), + resource.TestCheckResourceAttr(resourceName, "parameter.2450940716.name", "collation_connection"), + resource.TestCheckResourceAttr(resourceName, "parameter.2450940716.value", "utf8_general_ci"), + resource.TestCheckResourceAttr(resourceName, "parameter.242489837.name", "join_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.242489837.value", "16777216"), + resource.TestCheckResourceAttr(resourceName, "parameter.2026669454.name", "key_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2026669454.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.2705275319.name", "max_connections"), + resource.TestCheckResourceAttr(resourceName, "parameter.2705275319.value", "3200"), + resource.TestCheckResourceAttr(resourceName, "parameter.3512697936.name", "max_heap_table_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.3512697936.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.780730667.name", "performance_schema"), + resource.TestCheckResourceAttr(resourceName, "parameter.780730667.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.2020346918.name", "performance_schema_users_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2020346918.value", "1048576"), + resource.TestCheckResourceAttr(resourceName, "parameter.1460834103.name", "query_cache_limit"), + resource.TestCheckResourceAttr(resourceName, "parameter.1460834103.value", "2097152"), + resource.TestCheckResourceAttr(resourceName, "parameter.484865451.name", "query_cache_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.484865451.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.255276438.name", "sort_buffer_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.255276438.value", "16777216"), + resource.TestCheckResourceAttr(resourceName, "parameter.2981725119.name", "table_open_cache"), + resource.TestCheckResourceAttr(resourceName, "parameter.2981725119.value", "4096"), + resource.TestCheckResourceAttr(resourceName, "parameter.2703661820.name", "tmp_table_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2703661820.value", "67108864"), + resource.TestCheckResourceAttr(resourceName, "parameter.2386583229.name", "binlog_cache_size"), + resource.TestCheckResourceAttr(resourceName, "parameter.2386583229.value", "131072"), + resource.TestCheckResourceAttr(resourceName, "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), + resource.TestCheckResourceAttr(resourceName, "parameter.4012389720.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.2688783017.name", "innodb_open_files"), + resource.TestCheckResourceAttr(resourceName, "parameter.2688783017.value", "4000"), + resource.TestCheckResourceAttr(resourceName, "parameter.782983977.name", "innodb_read_io_threads"), + resource.TestCheckResourceAttr(resourceName, "parameter.782983977.value", "64"), + resource.TestCheckResourceAttr(resourceName, "parameter.2809980413.name", "innodb_thread_concurrency"), + resource.TestCheckResourceAttr(resourceName, "parameter.2809980413.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.3599115250.name", "innodb_write_io_threads"), + resource.TestCheckResourceAttr(resourceName, "parameter.3599115250.value", "64"), + resource.TestCheckResourceAttr(resourceName, "parameter.2557156277.name", "character_set_connection"), + resource.TestCheckResourceAttr(resourceName, "parameter.2557156277.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.2475346812.name", "character_set_database"), + resource.TestCheckResourceAttr(resourceName, "parameter.2475346812.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1986528518.name", "character_set_filesystem"), + resource.TestCheckResourceAttr(resourceName, "parameter.1986528518.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1708034931.name", "character_set_results"), + resource.TestCheckResourceAttr(resourceName, "parameter.1708034931.value", "utf8"), + resource.TestCheckResourceAttr(resourceName, "parameter.1937131004.name", "event_scheduler"), + resource.TestCheckResourceAttr(resourceName, "parameter.1937131004.value", "ON"), + resource.TestCheckResourceAttr(resourceName, "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), + resource.TestCheckResourceAttr(resourceName, "parameter.3437079877.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.1092112861.name", "innodb_file_format"), + resource.TestCheckResourceAttr(resourceName, "parameter.1092112861.value", "barracuda"), + resource.TestCheckResourceAttr(resourceName, "parameter.615571931.name", "innodb_io_capacity"), + resource.TestCheckResourceAttr(resourceName, "parameter.615571931.value", "2000"), + resource.TestCheckResourceAttr(resourceName, "parameter.1065962799.name", "innodb_io_capacity_max"), + resource.TestCheckResourceAttr(resourceName, "parameter.1065962799.value", "3000"), + resource.TestCheckResourceAttr(resourceName, "parameter.1411161182.name", "innodb_lock_wait_timeout"), + resource.TestCheckResourceAttr(resourceName, "parameter.1411161182.value", "120"), + resource.TestCheckResourceAttr(resourceName, "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), + resource.TestCheckResourceAttr(resourceName, "parameter.3133315879.value", "90"), + resource.TestCheckResourceAttr(resourceName, "parameter.950177639.name", "log_bin_trust_function_creators"), + resource.TestCheckResourceAttr(resourceName, "parameter.950177639.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.591700516.name", "log_warnings"), + resource.TestCheckResourceAttr(resourceName, "parameter.591700516.value", "2"), + resource.TestCheckResourceAttr(resourceName, "parameter.1918306725.name", "log_output"), + resource.TestCheckResourceAttr(resourceName, "parameter.1918306725.value", "FILE"), + resource.TestCheckResourceAttr(resourceName, "parameter.386204433.name", "max_allowed_packet"), + resource.TestCheckResourceAttr(resourceName, "parameter.386204433.value", "1073741824"), + resource.TestCheckResourceAttr(resourceName, "parameter.1700901269.name", "max_connect_errors"), + resource.TestCheckResourceAttr(resourceName, "parameter.1700901269.value", "100"), + resource.TestCheckResourceAttr(resourceName, "parameter.2839701698.name", "query_cache_min_res_unit"), + resource.TestCheckResourceAttr(resourceName, "parameter.2839701698.value", "512"), + resource.TestCheckResourceAttr(resourceName, "parameter.427634017.name", "slow_query_log"), + resource.TestCheckResourceAttr(resourceName, "parameter.427634017.value", "1"), + resource.TestCheckResourceAttr(resourceName, "parameter.881816039.name", "sync_binlog"), + resource.TestCheckResourceAttr(resourceName, "parameter.881816039.value", "0"), + resource.TestCheckResourceAttr(resourceName, "parameter.748684209.name", "tx_isolation"), + resource.TestCheckResourceAttr(resourceName, "parameter.748684209.value", "REPEATABLE-READ"), ), }, }, @@ -377,7 +365,9 @@ func TestAccAWSDBParameterGroup_basic(t *testing.T) { func TestAccAWSDBParameterGroup_Disappears(t *testing.T) { var v rds.DBParameterGroup + resourceName := "aws_db_parameter_group.test" groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -386,7 +376,7 @@ func TestAccAWSDBParameterGroup_Disappears(t *testing.T) { { Config: testAccAWSDBParameterGroupConfig(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), testAccCheckAWSDbParamaterGroupDisappears(&v), ), ExpectNonEmptyPlan: true, @@ -435,7 +425,7 @@ func TestAccAWSDBParameterGroup_generatedName(t *testing.T) { func TestAccAWSDBParameterGroup_withApplyMethod(t *testing.T) { var v rds.DBParameterGroup - + resourceName := "aws_db_parameter_group.test" groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -446,36 +436,42 @@ func TestAccAWSDBParameterGroup_withApplyMethod(t *testing.T) { { Config: testAccAWSDBParameterGroupConfigWithApplyMethod(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), testAccCheckAWSDBParameterGroupAttributes(&v, groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), + resourceName, "name", groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), + resourceName, "family", "mysql5.6"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "description", "Managed by Terraform"), + resourceName, "description", "Managed by Terraform"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.name", "character_set_server"), + resourceName, "parameter.2421266705.name", "character_set_server"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.value", "utf8"), + resourceName, "parameter.2421266705.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2421266705.apply_method", "immediate"), + resourceName, "parameter.2421266705.apply_method", "immediate"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.name", "character_set_client"), + resourceName, "parameter.2478663599.name", "character_set_client"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.value", "utf8"), + resourceName, "parameter.2478663599.value", "utf8"), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "parameter.2478663599.apply_method", "pending-reboot"), + resourceName, "parameter.2478663599.apply_method", "pending-reboot"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSDBParameterGroup_Only(t *testing.T) { var v rds.DBParameterGroup - + resourceName := "aws_db_parameter_group.test" groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -484,22 +480,28 @@ func TestAccAWSDBParameterGroup_Only(t *testing.T) { { Config: testAccAWSDBParameterGroupOnlyConfig(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), testAccCheckAWSDBParameterGroupAttributes(&v, groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), + resourceName, "name", groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "mysql5.6"), + resourceName, "family", "mysql5.6"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSDBParameterGroup_MatchDefault(t *testing.T) { var v rds.DBParameterGroup - + resourceName := "aws_db_parameter_group.test" groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -508,13 +510,19 @@ func TestAccAWSDBParameterGroup_MatchDefault(t *testing.T) { { Config: testAccAWSDBParameterGroupIncludeDefaultConfig(groupName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.bar", &v), + testAccCheckAWSDBParameterGroupExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "name", groupName), + resourceName, "name", groupName), resource.TestCheckResourceAttr( - "aws_db_parameter_group.bar", "family", "postgres9.4"), + resourceName, "family", "postgres9.4"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parameter"}, + }, }, }) } @@ -621,7 +629,7 @@ func testAccCheckAWSDBParameterGroupExists(n string, v *rds.DBParameterGroup) re func testAccAWSDBParameterGroupConfig(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" @@ -641,7 +649,7 @@ resource "aws_db_parameter_group" "bar" { } tags = { - foo = "bar" + foo = "test" } } `, n) @@ -649,7 +657,7 @@ resource "aws_db_parameter_group" "bar" { func testAccAWSDBParameterGroupConfigWithApplyMethod(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" @@ -665,7 +673,7 @@ resource "aws_db_parameter_group" "bar" { } tags = { - foo = "bar" + foo = "test" } } `, n) @@ -673,7 +681,7 @@ resource "aws_db_parameter_group" "bar" { func testAccAWSDBParameterGroupAddParametersConfig(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" description = "Test parameter group for terraform" @@ -704,7 +712,7 @@ resource "aws_db_parameter_group" "bar" { } tags = { - foo = "bar" + foo = "test" baz = "foo" } } @@ -713,7 +721,7 @@ resource "aws_db_parameter_group" "bar" { func testAccAWSDBParameterGroupOnlyConfig(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" description = "Test parameter group for terraform" @@ -723,7 +731,7 @@ resource "aws_db_parameter_group" "bar" { func createAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "large" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" description = "RDS default parameter group: Exceed default AWS parameter group limit of twenty" @@ -785,7 +793,7 @@ resource "aws_db_parameter_group" "large" { parameter { name = "innodb_file_format" - value = "Barracuda" + value = "barracuda" } parameter { @@ -943,7 +951,7 @@ resource "aws_db_parameter_group" "large" { func updateAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "large" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "mysql5.6" description = "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty" @@ -1005,7 +1013,7 @@ resource "aws_db_parameter_group" "large" { parameter { name = "innodb_file_format" - value = "Barracuda" + value = "barracuda" } parameter { @@ -1163,7 +1171,7 @@ resource "aws_db_parameter_group" "large" { func testAccAWSDBParameterGroupIncludeDefaultConfig(n string) string { return fmt.Sprintf(` -resource "aws_db_parameter_group" "bar" { +resource "aws_db_parameter_group" "test" { name = "%s" family = "postgres9.4" diff --git a/aws/resource_aws_db_security_group_test.go b/aws/resource_aws_db_security_group_test.go index 8e21211668a..8010ed9b707 100644 --- a/aws/resource_aws_db_security_group_test.go +++ b/aws/resource_aws_db_security_group_test.go @@ -14,39 +14,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSDBSecurityGroup_importBasic(t *testing.T) { - oldvar := os.Getenv("AWS_DEFAULT_REGION") - os.Setenv("AWS_DEFAULT_REGION", "us-east-1") - defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - - rName := fmt.Sprintf("tf-acc-%s", acctest.RandString(5)) - resourceName := "aws_db_security_group.bar" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDBSecurityGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDBSecurityGroupConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSDBSecurityGroup_basic(t *testing.T) { var v rds.DBSecurityGroup oldvar := os.Getenv("AWS_DEFAULT_REGION") os.Setenv("AWS_DEFAULT_REGION", "us-east-1") defer os.Setenv("AWS_DEFAULT_REGION", oldvar) - + resourceName := "aws_db_security_group.test" rName := fmt.Sprintf("tf-acc-%s", acctest.RandString(5)) resource.ParallelTest(t, resource.TestCase{ @@ -57,21 +31,26 @@ func TestAccAWSDBSecurityGroup_basic(t *testing.T) { { Config: testAccAWSDBSecurityGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDBSecurityGroupExists("aws_db_security_group.bar", &v), + testAccCheckAWSDBSecurityGroupExists(resourceName, &v), testAccCheckAWSDBSecurityGroupAttributes(&v), - resource.TestMatchResourceAttr("aws_db_security_group.bar", "arn", regexp.MustCompile(`^arn:[^:]+:rds:[^:]+:\d{12}:secgrp:.+`)), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(`^arn:[^:]+:rds:[^:]+:\d{12}:secgrp:.+`)), resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "name", rName), + resourceName, "name", rName), resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "description", "Managed by Terraform"), + resourceName, "description", "Managed by Terraform"), resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "ingress.3363517775.cidr", "10.0.0.1/24"), + resourceName, "ingress.3363517775.cidr", "10.0.0.1/24"), resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "ingress.#", "1"), + resourceName, "ingress.#", "1"), resource.TestCheckResourceAttr( - "aws_db_security_group.bar", "tags.%", "1"), + resourceName, "tags.%", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -169,7 +148,7 @@ func testAccCheckAWSDBSecurityGroupExists(n string, v *rds.DBSecurityGroup) reso func testAccAWSDBSecurityGroupConfig(name string) string { return fmt.Sprintf(` -resource "aws_db_security_group" "bar" { +resource "aws_db_security_group" "test" { name = "%s" ingress { @@ -177,7 +156,7 @@ resource "aws_db_security_group" "bar" { } tags = { - foo = "bar" + foo = "test" } } `, name) diff --git a/aws/resource_aws_db_subnet_group_test.go b/aws/resource_aws_db_subnet_group_test.go index 94d709f7c44..df10aa76dc6 100644 --- a/aws/resource_aws_db_subnet_group_test.go +++ b/aws/resource_aws_db_subnet_group_test.go @@ -65,37 +65,13 @@ func testSweepRdsDbSubnetGroups(region string) error { return nil } -func TestAccAWSDBSubnetGroup_importBasic(t *testing.T) { - resourceName := "aws_db_subnet_group.foo" - - rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckDBSubnetGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDBSubnetGroupConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "description"}, - }, - }, - }) -} - func TestAccAWSDBSubnetGroup_basic(t *testing.T) { var v rds.DBSubnetGroup testCheck := func(*terraform.State) error { return nil } - + resourceName := "aws_db_subnet_group.test" rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) resource.ParallelTest(t, resource.TestCase{ @@ -107,22 +83,30 @@ func TestAccAWSDBSubnetGroup_basic(t *testing.T) { Config: testAccDBSubnetGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "name", rName), + resourceName, "name", rName), resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "Managed by Terraform"), + resourceName, "description", "Managed by Terraform"), resource.TestMatchResourceAttr( - "aws_db_subnet_group.foo", "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:subgrp:%s", rName))), + resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:[^:]+:rds:[^:]+:\\d{12}:subgrp:%s", rName))), testCheck, ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "description"}, + }, }, }) } func TestAccAWSDBSubnetGroup_namePrefix(t *testing.T) { var v rds.DBSubnetGroup + resourceName := "aws_db_subnet_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -133,9 +117,9 @@ func TestAccAWSDBSubnetGroup_namePrefix(t *testing.T) { Config: testAccDBSubnetGroupConfig_namePrefix, Check: resource.ComposeTestCheckFunc( testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.test", &v), + resourceName, &v), resource.TestMatchResourceAttr( - "aws_db_subnet_group.test", "name", regexp.MustCompile("^tf_test-")), + resourceName, "name", regexp.MustCompile("^tf_test-")), ), }, }, @@ -144,6 +128,7 @@ func TestAccAWSDBSubnetGroup_namePrefix(t *testing.T) { func TestAccAWSDBSubnetGroup_generatedName(t *testing.T) { var v rds.DBSubnetGroup + resourceName := "aws_db_subnet_group.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -154,7 +139,7 @@ func TestAccAWSDBSubnetGroup_generatedName(t *testing.T) { Config: testAccDBSubnetGroupConfig_generatedName, Check: resource.ComposeTestCheckFunc( testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.test", &v), + resourceName, &v), ), }, }, @@ -169,6 +154,7 @@ func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) { testCheck := func(*terraform.State) error { return nil } + resourceName := "aws_db_subnet_group.underscores" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -187,14 +173,22 @@ func TestAccAWSDBSubnetGroup_withUndocumentedCharacters(t *testing.T) { testCheck, ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "description"}, + }, }, }) } func TestAccAWSDBSubnetGroup_updateDescription(t *testing.T) { var v rds.DBSubnetGroup - + resourceName := "aws_db_subnet_group.test" rName := fmt.Sprintf("tf-test-%d", acctest.RandInt()) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -204,19 +198,25 @@ func TestAccAWSDBSubnetGroup_updateDescription(t *testing.T) { Config: testAccDBSubnetGroupConfig(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "Managed by Terraform"), + resourceName, "description", "Managed by Terraform"), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "description"}, + }, { Config: testAccDBSubnetGroupConfig_updatedDescription(rName), Check: resource.ComposeTestCheckFunc( testAccCheckDBSubnetGroupExists( - "aws_db_subnet_group.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_db_subnet_group.foo", "description", "foo description updated"), + resourceName, "description", "test description updated"), ), }, }, @@ -288,7 +288,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -296,10 +296,10 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" availability_zone = "${data.aws_availability_zones.available.names[0]}" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-db-subnet-group-1" @@ -309,16 +309,16 @@ resource "aws_subnet" "foo" { resource "aws_subnet" "bar" { cidr_block = "10.1.2.0/24" availability_zone = "${data.aws_availability_zones.available.names[1]}" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-db-subnet-group-2" } } -resource "aws_db_subnet_group" "foo" { +resource "aws_db_subnet_group" "test" { name = "%s" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + subnet_ids = ["${aws_subnet.test.id}", "${aws_subnet.bar.id}"] tags = { Name = "tf-dbsubnet-group-test" @@ -333,7 +333,7 @@ data "aws_availability_zones" "available" { state = "available" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -341,10 +341,10 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" availability_zone = "${data.aws_availability_zones.available.names[0]}" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-db-subnet-group-1" @@ -354,17 +354,17 @@ resource "aws_subnet" "foo" { resource "aws_subnet" "bar" { cidr_block = "10.1.2.0/24" availability_zone = "${data.aws_availability_zones.available.names[1]}" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-db-subnet-group-2" } } -resource "aws_db_subnet_group" "foo" { +resource "aws_db_subnet_group" "test" { name = "%s" - description = "foo description updated" - subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"] + description = "test description updated" + subnet_ids = ["${aws_subnet.test.id}", "${aws_subnet.bar.id}"] tags = { Name = "tf-dbsubnet-group-test" From a6183b78ffd45fb1e39a377abe2a5623094cdfd3 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 22 Oct 2019 11:38:15 +0200 Subject: [PATCH 13/55] Import test refactor for dax resources --- aws/resource_aws_dax_cluster_test.go | 105 ++++++++++--------- aws/resource_aws_dax_parameter_group_test.go | 37 ++----- 2 files changed, 66 insertions(+), 76 deletions(-) diff --git a/aws/resource_aws_dax_cluster_test.go b/aws/resource_aws_dax_cluster_test.go index 0b26b7aff47..eb86c61d625 100644 --- a/aws/resource_aws_dax_cluster_test.go +++ b/aws/resource_aws_dax_cluster_test.go @@ -58,32 +58,11 @@ func testSweepDAXClusters(region string) error { return nil } -func TestAccAWSDAXCluster_importBasic(t *testing.T) { - resourceName := "aws_dax_cluster.test" - rString := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSDAXClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSDAXClusterConfig(rString), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSDAXCluster_basic(t *testing.T) { var dc dax.Cluster rString := acctest.RandString(10) iamRoleResourceName := "aws_iam_role.test" + resourceName := "aws_dax_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, @@ -93,37 +72,42 @@ func TestAccAWSDAXCluster_basic(t *testing.T) { { Config: testAccAWSDAXClusterConfig(rString), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), - testAccMatchResourceAttrRegionalARN("aws_dax_cluster.test", "arn", "dax", regexp.MustCompile("cache/.+")), + testAccCheckAWSDAXClusterExists(resourceName, &dc), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "dax", regexp.MustCompile("cache/.+")), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "cluster_name", regexp.MustCompile(`^tf-\w+$`)), - resource.TestCheckResourceAttrPair("aws_dax_cluster.test", "iam_role_arn", iamRoleResourceName, "arn"), + resourceName, "cluster_name", regexp.MustCompile(`^tf-\w+$`)), + resource.TestCheckResourceAttrPair(resourceName, "iam_role_arn", iamRoleResourceName, "arn"), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "node_type", "dax.t2.small"), + resourceName, "node_type", "dax.t2.small"), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "replication_factor", "1"), + resourceName, "replication_factor", "1"), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "description", "test cluster"), + resourceName, "description", "test cluster"), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "parameter_group_name", regexp.MustCompile(`^default.dax`)), + resourceName, "parameter_group_name", regexp.MustCompile(`^default.dax`)), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "maintenance_window", regexp.MustCompile(`^\w{3}:\d{2}:\d{2}-\w{3}:\d{2}:\d{2}$`)), + resourceName, "maintenance_window", regexp.MustCompile(`^\w{3}:\d{2}:\d{2}-\w{3}:\d{2}:\d{2}$`)), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "subnet_group_name", "default"), + resourceName, "subnet_group_name", "default"), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "nodes.0.id", regexp.MustCompile(`^tf-[\w-]+$`)), + resourceName, "nodes.0.id", regexp.MustCompile(`^tf-[\w-]+$`)), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "configuration_endpoint", regexp.MustCompile(`:\d+$`)), + resourceName, "configuration_endpoint", regexp.MustCompile(`:\d+$`)), resource.TestCheckResourceAttrSet( - "aws_dax_cluster.test", "cluster_address"), + resourceName, "cluster_address"), resource.TestMatchResourceAttr( - "aws_dax_cluster.test", "port", regexp.MustCompile(`^\d+$`)), + resourceName, "port", regexp.MustCompile(`^\d+$`)), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "server_side_encryption.#", "1"), + resourceName, "server_side_encryption.#", "1"), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "server_side_encryption.0.enabled", "false"), + resourceName, "server_side_encryption.0.enabled", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -131,6 +115,8 @@ func TestAccAWSDAXCluster_basic(t *testing.T) { func TestAccAWSDAXCluster_resize(t *testing.T) { var dc dax.Cluster rString := acctest.RandString(10) + resourceName := "aws_dax_cluster.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, Providers: testAccProviders, @@ -139,25 +125,30 @@ func TestAccAWSDAXCluster_resize(t *testing.T) { { Config: testAccAWSDAXClusterConfigResize_singleNode(rString), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + testAccCheckAWSDAXClusterExists(resourceName, &dc), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "replication_factor", "1"), + resourceName, "replication_factor", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSDAXClusterConfigResize_multiNode(rString), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + testAccCheckAWSDAXClusterExists(resourceName, &dc), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "replication_factor", "2"), + resourceName, "replication_factor", "2"), ), }, { Config: testAccAWSDAXClusterConfigResize_singleNode(rString), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), + testAccCheckAWSDAXClusterExists(resourceName, &dc), resource.TestCheckResourceAttr( - "aws_dax_cluster.test", "replication_factor", "1"), + resourceName, "replication_factor", "1"), ), }, }, @@ -167,6 +158,8 @@ func TestAccAWSDAXCluster_resize(t *testing.T) { func TestAccAWSDAXCluster_encryption_disabled(t *testing.T) { var dc dax.Cluster rString := acctest.RandString(10) + resourceName := "aws_dax_cluster.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, Providers: testAccProviders, @@ -175,11 +168,16 @@ func TestAccAWSDAXCluster_encryption_disabled(t *testing.T) { { Config: testAccAWSDAXClusterConfigWithEncryption(rString, false), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), - resource.TestCheckResourceAttr("aws_dax_cluster.test", "server_side_encryption.#", "1"), - resource.TestCheckResourceAttr("aws_dax_cluster.test", "server_side_encryption.0.enabled", "false"), + testAccCheckAWSDAXClusterExists(resourceName, &dc), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Ensure it shows no difference when removing server_side_encryption configuration { Config: testAccAWSDAXClusterConfig(rString), @@ -193,6 +191,8 @@ func TestAccAWSDAXCluster_encryption_disabled(t *testing.T) { func TestAccAWSDAXCluster_encryption_enabled(t *testing.T) { var dc dax.Cluster rString := acctest.RandString(10) + resourceName := "aws_dax_cluster.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, Providers: testAccProviders, @@ -201,11 +201,16 @@ func TestAccAWSDAXCluster_encryption_enabled(t *testing.T) { { Config: testAccAWSDAXClusterConfigWithEncryption(rString, true), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSDAXClusterExists("aws_dax_cluster.test", &dc), - resource.TestCheckResourceAttr("aws_dax_cluster.test", "server_side_encryption.#", "1"), - resource.TestCheckResourceAttr("aws_dax_cluster.test", "server_side_encryption.0.enabled", "true"), + testAccCheckAWSDAXClusterExists(resourceName, &dc), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption.0.enabled", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Ensure it shows a difference when removing server_side_encryption configuration { Config: testAccAWSDAXClusterConfig(rString), diff --git a/aws/resource_aws_dax_parameter_group_test.go b/aws/resource_aws_dax_parameter_group_test.go index a65aa5f75cb..5d6619bc573 100644 --- a/aws/resource_aws_dax_parameter_group_test.go +++ b/aws/resource_aws_dax_parameter_group_test.go @@ -12,31 +12,6 @@ import ( ) func TestAccAwsDaxParameterGroup_basic(t *testing.T) { - rName := acctest.RandomWithPrefix("tf-acc-test") - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSDax(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsDaxParameterGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccDaxParameterGroupConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDaxParameterGroupExists("aws_dax_parameter_group.test"), - resource.TestCheckResourceAttr("aws_dax_parameter_group.test", "parameters.#", "2"), - ), - }, - { - Config: testAccDaxParameterGroupConfig_parameters(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsDaxParameterGroupExists("aws_dax_parameter_group.test"), - resource.TestCheckResourceAttr("aws_dax_parameter_group.test", "parameters.#", "2"), - ), - }, - }, - }) -} - -func TestAccAwsDaxParameterGroup_import(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_dax_parameter_group.test" @@ -47,13 +22,23 @@ func TestAccAwsDaxParameterGroup_import(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccDaxParameterGroupConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsDaxParameterGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "parameters.#", "2"), + ), }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccDaxParameterGroupConfig_parameters(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsDaxParameterGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "parameters.#", "2"), + ), + }, }, }) } From 7c4131795941fea84053296d1c5bb00101116a78 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 22 Oct 2019 11:46:10 +0200 Subject: [PATCH 14/55] Import test refactor for customer gateway --- aws/resource_aws_customer_gateway_test.go | 71 ++++++++++------------- 1 file changed, 32 insertions(+), 39 deletions(-) diff --git a/aws/resource_aws_customer_gateway_test.go b/aws/resource_aws_customer_gateway_test.go index 6f7a47790d4..5956f57dfef 100644 --- a/aws/resource_aws_customer_gateway_test.go +++ b/aws/resource_aws_customer_gateway_test.go @@ -15,55 +15,39 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCustomerGateway_importBasic(t *testing.T) { - resourceName := "aws_customer_gateway.foo" - rInt := acctest.RandInt() - rBgpAsn := acctest.RandIntRange(64512, 65534) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCustomerGatewayDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSCustomerGateway_basic(t *testing.T) { var gateway ec2.CustomerGateway rBgpAsn := acctest.RandIntRange(64512, 65534) rInt := acctest.RandInt() + resourceName := "aws_customer_gateway.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_customer_gateway.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckCustomerGatewayDestroy, Steps: []resource.TestStep{ { Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), + testAccCheckCustomerGateway(resourceName, &gateway), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCustomerGatewayConfigUpdateTags(rInt, rBgpAsn), Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), + testAccCheckCustomerGateway(resourceName, &gateway), ), }, { Config: testAccCustomerGatewayConfigForceReplace(rInt, rBgpAsn), Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), + testAccCheckCustomerGateway(resourceName, &gateway), ), }, }, @@ -74,18 +58,25 @@ func TestAccAWSCustomerGateway_similarAlreadyExists(t *testing.T) { var gateway ec2.CustomerGateway rInt := acctest.RandInt() rBgpAsn := acctest.RandIntRange(64512, 65534) + resourceName := "aws_customer_gateway.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_customer_gateway.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckCustomerGatewayDestroy, Steps: []resource.TestStep{ { Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), + testAccCheckCustomerGateway(resourceName, &gateway), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCustomerGatewayConfigIdentical(rInt, rBgpAsn), ExpectError: regexp.MustCompile("An existing customer gateway"), @@ -98,6 +89,8 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) { rInt := acctest.RandInt() rBgpAsn := acctest.RandIntRange(64512, 65534) var gateway ec2.CustomerGateway + resourceName := "aws_customer_gateway.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -106,7 +99,7 @@ func TestAccAWSCustomerGateway_disappears(t *testing.T) { { Config: testAccCustomerGatewayConfig(rInt, rBgpAsn), Check: resource.ComposeTestCheckFunc( - testAccCheckCustomerGateway("aws_customer_gateway.foo", &gateway), + testAccCheckCustomerGateway(resourceName, &gateway), testAccAWSCustomerGatewayDisappears(&gateway), ), ExpectNonEmptyPlan: true, @@ -222,13 +215,13 @@ func testAccCheckCustomerGateway(gatewayResource string, cgw *ec2.CustomerGatewa func testAccCustomerGatewayConfig(rInt, rBgpAsn int) string { return fmt.Sprintf(` -resource "aws_customer_gateway" "foo" { +resource "aws_customer_gateway" "test" { bgp_asn = %d ip_address = "172.0.0.1" type = "ipsec.1" tags = { - Name = "foo-gateway-%d" + Name = "test-gateway-%d" } } `, rBgpAsn, rInt) @@ -236,13 +229,13 @@ resource "aws_customer_gateway" "foo" { func testAccCustomerGatewayConfigIdentical(randInt, rBgpAsn int) string { return fmt.Sprintf(` -resource "aws_customer_gateway" "foo" { +resource "aws_customer_gateway" "test" { bgp_asn = %d ip_address = "172.0.0.1" type = "ipsec.1" tags = { - Name = "foo-gateway-%d" + Name = "test-gateway-%d" } } @@ -252,7 +245,7 @@ resource "aws_customer_gateway" "identical" { type = "ipsec.1" tags = { - Name = "foo-gateway-identical-%d" + Name = "test-gateway-identical-%d" } } `, rBgpAsn, randInt, rBgpAsn, randInt) @@ -261,13 +254,13 @@ resource "aws_customer_gateway" "identical" { // Add the Another: "tag" tag. func testAccCustomerGatewayConfigUpdateTags(rInt, rBgpAsn int) string { return fmt.Sprintf(` -resource "aws_customer_gateway" "foo" { +resource "aws_customer_gateway" "test" { bgp_asn = %d ip_address = "172.0.0.1" type = "ipsec.1" tags = { - Name = "foo-gateway-%d" + Name = "test-gateway-%d" Another = "tag" } } @@ -277,13 +270,13 @@ resource "aws_customer_gateway" "foo" { // Change the ip_address. func testAccCustomerGatewayConfigForceReplace(rInt, rBgpAsn int) string { return fmt.Sprintf(` -resource "aws_customer_gateway" "foo" { +resource "aws_customer_gateway" "test" { bgp_asn = %d ip_address = "172.10.10.1" type = "ipsec.1" tags = { - Name = "foo-gateway-%d" + Name = "test-gateway-%d" Another = "tag" } } From ebb35bbc979d24357c538ff4de76578fc79b4a94 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 22 Oct 2019 14:49:33 +0200 Subject: [PATCH 15/55] Import test refactor for cognito resources --- ...resource_aws_cognito_identity_pool_test.go | 190 ++--- aws/resource_aws_cognito_user_group_test.go | 67 +- ...ource_aws_cognito_user_pool_client_test.go | 238 ++++--- aws/resource_aws_cognito_user_pool_test.go | 655 ++++++++++-------- 4 files changed, 627 insertions(+), 523 deletions(-) diff --git a/aws/resource_aws_cognito_identity_pool_test.go b/aws/resource_aws_cognito_identity_pool_test.go index ca598b2e99b..2f08c4f0c99 100644 --- a/aws/resource_aws_cognito_identity_pool_test.go +++ b/aws/resource_aws_cognito_identity_pool_test.go @@ -14,31 +14,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCognitoIdentityPool_importBasic(t *testing.T) { - resourceName := "aws_cognito_identity_pool.main" - rName := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCognitoIdentityPoolConfig_basic(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) updatedName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -48,18 +27,23 @@ func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestMatchResourceAttr("aws_cognito_identity_pool.main", "arn", + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile("^arn:aws:cognito-identity:[^:]+:[0-9]{12}:identitypool/[^:]+:([0-9a-f]){8}-([0-9a-f]){4}-([0-9a-f]){4}-([0-9a-f]){4}-([0-9a-f]){12}$")), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "allow_unauthenticated_identities", "false"), + resource.TestCheckResourceAttr(resourceName, "allow_unauthenticated_identities", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(updatedName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", updatedName)), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", updatedName)), ), }, }, @@ -68,6 +52,7 @@ func TestAccAWSCognitoIdentityPool_basic(t *testing.T) { func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -77,25 +62,30 @@ func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfig_supportedLoginProviders(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "supported_login_providers.graph.facebook.com", "7346241598935555"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "supported_login_providers.graph.facebook.com", "7346241598935555"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_supportedLoginProvidersModified(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "supported_login_providers.graph.facebook.com", "7346241598935552"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "supported_login_providers.accounts.google.com", "123456789012.apps.googleusercontent.com"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "supported_login_providers.graph.facebook.com", "7346241598935552"), + resource.TestCheckResourceAttr(resourceName, "supported_login_providers.accounts.google.com", "123456789012.apps.googleusercontent.com"), ), }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), ), }, }, @@ -104,6 +94,7 @@ func TestAccAWSCognitoIdentityPool_supportedLoginProviders(t *testing.T) { func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -113,24 +104,29 @@ func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfig_openidConnectProviderArns(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "openid_connect_provider_arns.#", "1"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "openid_connect_provider_arns.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_openidConnectProviderArnsModified(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "openid_connect_provider_arns.#", "2"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "openid_connect_provider_arns.#", "2"), ), }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), ), }, }, @@ -139,6 +135,7 @@ func TestAccAWSCognitoIdentityPool_openidConnectProviderArns(t *testing.T) { func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -148,25 +145,30 @@ func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfig_samlProviderArns(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "saml_provider_arns.#", "1"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "saml_provider_arns.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_samlProviderArnsModified(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "saml_provider_arns.#", "1"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "saml_provider_arns.#", "1"), ), }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "saml_provider_arns.#", "0"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "saml_provider_arns.#", "0"), ), }, }, @@ -175,6 +177,7 @@ func TestAccAWSCognitoIdentityPool_samlProviderArns(t *testing.T) { func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -184,31 +187,36 @@ func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfig_cognitoIdentityProviders(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.66456389.client_id", "7lhlkkfbfb4q5kpp90urffao"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.66456389.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Zr231apJu"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.66456389.server_side_token_check", "false"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3571192419.client_id", "7lhlkkfbfb4q5kpp90urffao"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3571192419.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Ab129faBb"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3571192419.server_side_token_check", "false"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.66456389.client_id", "7lhlkkfbfb4q5kpp90urffao"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.66456389.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Zr231apJu"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.66456389.server_side_token_check", "false"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3571192419.client_id", "7lhlkkfbfb4q5kpp90urffao"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3571192419.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Ab129faBb"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3571192419.server_side_token_check", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_cognitoIdentityProvidersModified(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3661724441.client_id", "6lhlkkfbfb4q5kpp90urffae"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3661724441.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Zr231apJu"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.3661724441.server_side_token_check", "false"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3661724441.client_id", "6lhlkkfbfb4q5kpp90urffae"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3661724441.provider_name", "cognito-idp.us-east-1.amazonaws.com/us-east-1_Zr231apJu"), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.3661724441.server_side_token_check", "false"), ), }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), ), }, }, @@ -217,6 +225,7 @@ func TestAccAWSCognitoIdentityPool_cognitoIdentityProviders(t *testing.T) { func TestAccAWSCognitoIdentityPool_addingNewProviderKeepsOldProvider(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -226,27 +235,32 @@ func TestAccAWSCognitoIdentityPool_addingNewProviderKeepsOldProvider(t *testing. { Config: testAccAWSCognitoIdentityPoolConfig_cognitoIdentityProviders(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.#", "2"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.#", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfig_cognitoIdentityProvidersAndOpenidConnectProviderArns(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "openid_connect_provider_arns.#", "1"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "openid_connect_provider_arns.#", "1"), ), }, { Config: testAccAWSCognitoIdentityPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "identity_pool_name", fmt.Sprintf("identity pool %s", name)), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "cognito_identity_providers.#", "0"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "openid_connect_provider_arns.#", "0"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "identity_pool_name", fmt.Sprintf("identity pool %s", name)), + resource.TestCheckResourceAttr(resourceName, "cognito_identity_providers.#", "0"), + resource.TestCheckResourceAttr(resourceName, "openid_connect_provider_arns.#", "0"), ), }, }, @@ -255,6 +269,7 @@ func TestAccAWSCognitoIdentityPool_addingNewProviderKeepsOldProvider(t *testing. func TestAccAWSCognitoIdentityPoolWithTags(t *testing.T) { name := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_identity_pool.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentity(t) }, @@ -264,16 +279,21 @@ func TestAccAWSCognitoIdentityPoolWithTags(t *testing.T) { { Config: testAccAWSCognitoIdentityPoolConfigWithTags(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "tags.environment", "dev"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.environment", "dev"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoIdentityPoolConfigWithTagsUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoIdentityPoolExists("aws_cognito_identity_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "tags.environment", "dev"), - resource.TestCheckResourceAttr("aws_cognito_identity_pool.main", "tags.project", "Terraform"), + testAccCheckAWSCognitoIdentityPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.environment", "dev"), + resource.TestCheckResourceAttr(resourceName, "tags.project", "Terraform"), ), }, }, diff --git a/aws/resource_aws_cognito_user_group_test.go b/aws/resource_aws_cognito_user_group_test.go index 7b5df1e410f..41ebf62abcb 100644 --- a/aws/resource_aws_cognito_user_group_test.go +++ b/aws/resource_aws_cognito_user_group_test.go @@ -17,6 +17,7 @@ func TestAccAWSCognitoUserGroup_basic(t *testing.T) { poolName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + resourceName := "aws_cognito_user_group.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -26,15 +27,20 @@ func TestAccAWSCognitoUserGroup_basic(t *testing.T) { { Config: testAccAWSCognitoUserGroupConfig_basic(poolName, groupName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserGroupExists("aws_cognito_user_group.main"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "name", groupName), + testAccCheckAWSCognitoUserGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", groupName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserGroupConfig_basic(poolName, updatedGroupName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserGroupExists("aws_cognito_user_group.main"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "name", updatedGroupName), + testAccCheckAWSCognitoUserGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", updatedGroupName), ), }, }, @@ -45,6 +51,7 @@ func TestAccAWSCognitoUserGroup_complex(t *testing.T) { poolName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) updatedGroupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) + resourceName := "aws_cognito_user_group.main" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -54,21 +61,26 @@ func TestAccAWSCognitoUserGroup_complex(t *testing.T) { { Config: testAccAWSCognitoUserGroupConfig_complex(poolName, groupName, "This is the user group description", 1), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserGroupExists("aws_cognito_user_group.main"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "name", groupName), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "description", "This is the user group description"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "precedence", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_group.main", "role_arn"), + testAccCheckAWSCognitoUserGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", groupName), + resource.TestCheckResourceAttr(resourceName, "description", "This is the user group description"), + resource.TestCheckResourceAttr(resourceName, "precedence", "1"), + resource.TestCheckResourceAttrSet(resourceName, "role_arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserGroupConfig_complex(poolName, updatedGroupName, "This is the updated user group description", 42), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserGroupExists("aws_cognito_user_group.main"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "name", updatedGroupName), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "description", "This is the updated user group description"), - resource.TestCheckResourceAttr("aws_cognito_user_group.main", "precedence", "42"), - resource.TestCheckResourceAttrSet("aws_cognito_user_group.main", "role_arn"), + testAccCheckAWSCognitoUserGroupExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", updatedGroupName), + resource.TestCheckResourceAttr(resourceName, "description", "This is the updated user group description"), + resource.TestCheckResourceAttr(resourceName, "precedence", "42"), + resource.TestCheckResourceAttrSet(resourceName, "role_arn"), ), }, }, @@ -91,6 +103,11 @@ func TestAccAWSCognitoUserGroup_RoleArn(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "role_arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserGroupConfig_RoleArn_Updated(rName), Check: resource.ComposeAggregateTestCheckFunc( @@ -102,28 +119,6 @@ func TestAccAWSCognitoUserGroup_RoleArn(t *testing.T) { }) } -func TestAccAWSCognitoUserGroup_importBasic(t *testing.T) { - resourceName := "aws_cognito_user_group.main" - poolName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - groupName := fmt.Sprintf("tf-acc-%s", acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCognitoUserGroupDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCognitoUserGroupConfig_basic(poolName, groupName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckAWSCognitoUserGroupExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] diff --git a/aws/resource_aws_cognito_user_pool_client_test.go b/aws/resource_aws_cognito_user_pool_client_test.go index 140a2828044..f8aa2c96ea6 100644 --- a/aws/resource_aws_cognito_user_pool_client_test.go +++ b/aws/resource_aws_cognito_user_pool_client_test.go @@ -16,6 +16,7 @@ import ( func TestAccAWSCognitoUserPoolClient_basic(t *testing.T) { userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_user_pool_client.client" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -25,62 +26,15 @@ func TestAccAWSCognitoUserPoolClient_basic(t *testing.T) { { Config: testAccAWSCognitoUserPoolClientConfig_basic(userPoolName, clientName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolClientExists("aws_cognito_user_pool_client.client"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "name", clientName), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), + testAccCheckAWSCognitoUserPoolClientExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", clientName), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.#", "1"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), ), }, - }, - }) -} - -func TestAccAWSCognitoUserPoolClient_importBasic(t *testing.T) { - userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) - clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - - resourceName := "aws_cognito_user_pool_client.client" - - getStateId := func(s *terraform.State) (string, error) { - - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("Not found: %s", resourceName) - } - - if rs.Primary.ID == "" { - return "", errors.New("No Cognito User Pool Client ID set") - } - - conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn - userPoolId := rs.Primary.Attributes["user_pool_id"] - clientId := rs.Primary.ID - - params := &cognitoidentityprovider.DescribeUserPoolClientInput{ - UserPoolId: aws.String(userPoolId), - ClientId: aws.String(clientId), - } - - _, err := conn.DescribeUserPoolClient(params) - - if err != nil { - return "", err - } - - return fmt.Sprintf("%s/%s", userPoolId, clientId), nil - } - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSEcsServiceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCognitoUserPoolClientConfig_basic(userPoolName, clientName), - }, { ResourceName: resourceName, - ImportStateIdFunc: getStateId, + ImportStateIdFunc: testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName), ImportState: true, ImportStateVerify: true, }, @@ -90,6 +44,7 @@ func TestAccAWSCognitoUserPoolClient_importBasic(t *testing.T) { func TestAccAWSCognitoUserPoolClient_RefreshTokenValidity(t *testing.T) { rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_cognito_user_pool_client.client" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -99,15 +54,21 @@ func TestAccAWSCognitoUserPoolClient_RefreshTokenValidity(t *testing.T) { { Config: testAccAWSCognitoUserPoolClientConfig_RefreshTokenValidity(rName, 60), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolClientExists("aws_cognito_user_pool_client.client"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "refresh_token_validity", "60"), + testAccCheckAWSCognitoUserPoolClientExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "refresh_token_validity", "60"), ), }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolClientConfig_RefreshTokenValidity(rName, 120), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolClientExists("aws_cognito_user_pool_client.client"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "refresh_token_validity", "120"), + testAccCheckAWSCognitoUserPoolClientExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "refresh_token_validity", "120"), ), }, }, @@ -130,6 +91,12 @@ func TestAccAWSCognitoUserPoolClient_Name(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "name", "name1"), ), }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolClientConfig_Name(rName, "name2"), Check: resource.ComposeAggregateTestCheckFunc( @@ -144,6 +111,7 @@ func TestAccAWSCognitoUserPoolClient_Name(t *testing.T) { func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_user_pool_client.client" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -153,36 +121,43 @@ func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { { Config: testAccAWSCognitoUserPoolClientConfig_allFields(userPoolName, clientName, 300), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolClientExists("aws_cognito_user_pool_client.client"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "name", clientName), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.#", "3"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.1728632605", "CUSTOM_AUTH_FLOW_ONLY"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.1860959087", "USER_PASSWORD_AUTH"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "generate_secret", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "read_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "read_attributes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "write_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "write_attributes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "refresh_token_validity", "300"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.2645166319", "code"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.3465961881", "implicit"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows_user_pool_client", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.#", "5"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.2517049750", "openid"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.2603607895", "phone"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.380129571", "aws.cognito.signin.user.admin"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.4080487570", "profile"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.0", "https://www.example.com/callback"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.1", "https://www.example.com/redirect"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "default_redirect_uri", "https://www.example.com/redirect"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "logout_urls.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "logout_urls.0", "https://www.example.com/login"), + testAccCheckAWSCognitoUserPoolClientExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", clientName), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.#", "3"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.1728632605", "CUSTOM_AUTH_FLOW_ONLY"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.1860959087", "USER_PASSWORD_AUTH"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), + resource.TestCheckResourceAttr(resourceName, "generate_secret", "true"), + resource.TestCheckResourceAttr(resourceName, "read_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "read_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "write_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "write_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "refresh_token_validity", "300"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.#", "2"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.2645166319", "code"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.3465961881", "implicit"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows_user_pool_client", "true"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.#", "5"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.2517049750", "openid"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.2603607895", "phone"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.380129571", "aws.cognito.signin.user.admin"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.4080487570", "profile"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.#", "2"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.0", "https://www.example.com/callback"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.1", "https://www.example.com/redirect"), + resource.TestCheckResourceAttr(resourceName, "default_redirect_uri", "https://www.example.com/redirect"), + resource.TestCheckResourceAttr(resourceName, "logout_urls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logout_urls.0", "https://www.example.com/login"), ), }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"generate_secret"}, + }, }, }) } @@ -190,6 +165,7 @@ func TestAccAWSCognitoUserPoolClient_allFields(t *testing.T) { func TestAccAWSCognitoUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { userPoolName := fmt.Sprintf("tf-acc-cognito-user-pool-%s", acctest.RandString(7)) clientName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + resourceName := "aws_cognito_user_pool_client.client" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -202,40 +178,78 @@ func TestAccAWSCognitoUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { { Config: testAccAWSCognitoUserPoolClientConfig_allFields(userPoolName, clientName, 299), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolClientExists("aws_cognito_user_pool_client.client"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "name", clientName), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.#", "3"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.1728632605", "CUSTOM_AUTH_FLOW_ONLY"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.1860959087", "USER_PASSWORD_AUTH"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "generate_secret", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "read_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "read_attributes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "write_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "write_attributes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "refresh_token_validity", "299"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.2645166319", "code"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows.3465961881", "implicit"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_flows_user_pool_client", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.#", "5"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.2517049750", "openid"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.2603607895", "phone"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.380129571", "aws.cognito.signin.user.admin"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "allowed_oauth_scopes.4080487570", "profile"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.0", "https://www.example.com/callback"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "callback_urls.1", "https://www.example.com/redirect"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "default_redirect_uri", "https://www.example.com/redirect"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "logout_urls.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool_client.client", "logout_urls.0", "https://www.example.com/login"), + testAccCheckAWSCognitoUserPoolClientExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", clientName), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.#", "3"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.1728632605", "CUSTOM_AUTH_FLOW_ONLY"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.1860959087", "USER_PASSWORD_AUTH"), + resource.TestCheckResourceAttr(resourceName, "explicit_auth_flows.245201344", "ADMIN_NO_SRP_AUTH"), + resource.TestCheckResourceAttr(resourceName, "generate_secret", "true"), + resource.TestCheckResourceAttr(resourceName, "read_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "read_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "write_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "write_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "refresh_token_validity", "299"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.#", "2"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.2645166319", "code"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows.3465961881", "implicit"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_flows_user_pool_client", "true"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.#", "5"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.2517049750", "openid"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.2603607895", "phone"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.380129571", "aws.cognito.signin.user.admin"), + resource.TestCheckResourceAttr(resourceName, "allowed_oauth_scopes.4080487570", "profile"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.#", "2"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.0", "https://www.example.com/callback"), + resource.TestCheckResourceAttr(resourceName, "callback_urls.1", "https://www.example.com/redirect"), + resource.TestCheckResourceAttr(resourceName, "default_redirect_uri", "https://www.example.com/redirect"), + resource.TestCheckResourceAttr(resourceName, "logout_urls.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logout_urls.0", "https://www.example.com/login"), ), }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"generate_secret"}, + }, }, }) } +func testAccAWSCognitoUserPoolClientImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return "", errors.New("No Cognito User Pool Client ID set") + } + + conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn + userPoolId := rs.Primary.Attributes["user_pool_id"] + clientId := rs.Primary.ID + + params := &cognitoidentityprovider.DescribeUserPoolClientInput{ + UserPoolId: aws.String(userPoolId), + ClientId: aws.String(clientId), + } + + _, err := conn.DescribeUserPoolClient(params) + + if err != nil { + return "", err + } + + return fmt.Sprintf("%s/%s", userPoolId, clientId), nil + } +} + func testAccCheckAWSCognitoUserPoolClientDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).cognitoidpconn diff --git a/aws/resource_aws_cognito_user_pool_test.go b/aws/resource_aws_cognito_user_pool_test.go index 0e2d3dba7b0..36790226b13 100644 --- a/aws/resource_aws_cognito_user_pool_test.go +++ b/aws/resource_aws_cognito_user_pool_test.go @@ -70,29 +70,9 @@ func testSweepCognitoUserPools(region string) error { return nil } -func TestAccAWSCognitoUserPool_importBasic(t *testing.T) { - resourceName := "aws_cognito_user_pool.pool" - name := acctest.RandString(5) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchDashboardDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCognitoUserPoolConfig_basic(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSCognitoUserPool_basic(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -102,22 +82,28 @@ func TestAccAWSCognitoUserPool_basic(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestMatchResourceAttr("aws_cognito_user_pool.pool", "arn", + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(`^arn:aws:cognito-idp:[^:]+:[0-9]{12}:userpool/[\w-]+_[0-9a-zA-Z]+$`)), - resource.TestMatchResourceAttr("aws_cognito_user_pool.pool", "endpoint", + resource.TestMatchResourceAttr(resourceName, "endpoint", regexp.MustCompile(`^cognito-idp\.[^.]+\.amazonaws.com/[\w-]+_[0-9a-zA-Z]+$`)), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "name", "terraform-test-pool-"+name), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "creation_date"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "last_modified_date"), + resource.TestCheckResourceAttr(resourceName, "name", "terraform-test-pool-"+name), + resource.TestCheckResourceAttrSet(resourceName, "creation_date"), + resource.TestCheckResourceAttrSet(resourceName, "last_modified_date"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSCognitoUserPool_withAdminCreateUserConfiguration(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -127,22 +113,27 @@ func TestAccAWSCognitoUserPool_withAdminCreateUserConfiguration(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withAdminCreateUserConfiguration(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.unused_account_validity_days", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.allow_admin_create_user_only", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.unused_account_validity_days", "6"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.allow_admin_create_user_only", "true"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withAdminCreateUserConfigurationUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.unused_account_validity_days", "7"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.allow_admin_create_user_only", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and constant password is {####}. "), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_subject", "Foo{####}BaBaz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and constant password is {####}."), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.unused_account_validity_days", "7"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.allow_admin_create_user_only", "false"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and constant password is {####}. "), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_subject", "Foo{####}BaBaz"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and constant password is {####}."), ), }, }, @@ -151,6 +142,7 @@ func TestAccAWSCognitoUserPool_withAdminCreateUserConfiguration(t *testing.T) { func TestAccAWSCognitoUserPool_withAdvancedSecurityMode(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -160,20 +152,25 @@ func TestAccAWSCognitoUserPool_withAdvancedSecurityMode(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withAdvancedSecurityMode(name, "OFF"), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "user_pool_add_ons.0.advanced_security_mode", "OFF"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "user_pool_add_ons.0.advanced_security_mode", "OFF"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withAdvancedSecurityMode(name, "ENFORCED"), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "user_pool_add_ons.0.advanced_security_mode", "ENFORCED"), + resource.TestCheckResourceAttr(resourceName, "user_pool_add_ons.0.advanced_security_mode", "ENFORCED"), ), }, { Config: testAccAWSCognitoUserPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "user_pool_add_ons.#", "0"), + resource.TestCheckResourceAttr(resourceName, "user_pool_add_ons.#", "0"), ), }, }, @@ -182,6 +179,7 @@ func TestAccAWSCognitoUserPool_withAdvancedSecurityMode(t *testing.T) { func TestAccAWSCognitoUserPool_withDeviceConfiguration(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -191,16 +189,21 @@ func TestAccAWSCognitoUserPool_withDeviceConfiguration(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withDeviceConfiguration(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.challenge_required_on_new_device", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.device_only_remembered_on_user_prompt", "false"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.challenge_required_on_new_device", "true"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.device_only_remembered_on_user_prompt", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withDeviceConfigurationUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.challenge_required_on_new_device", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.device_only_remembered_on_user_prompt", "true"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.challenge_required_on_new_device", "false"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.device_only_remembered_on_user_prompt", "true"), ), }, }, @@ -213,6 +216,7 @@ func TestAccAWSCognitoUserPool_withEmailVerificationMessage(t *testing.T) { updatedSubject := acctest.RandString(10) message := fmt.Sprintf("%s {####}", acctest.RandString(10)) upatedMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -222,16 +226,21 @@ func TestAccAWSCognitoUserPool_withEmailVerificationMessage(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withEmailVerificationMessage(name, subject, message), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", subject), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", message), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", subject), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", message), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withEmailVerificationMessage(name, updatedSubject, upatedMessage), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", updatedSubject), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", upatedMessage), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", updatedSubject), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", upatedMessage), ), }, }, @@ -244,6 +253,7 @@ func TestAccAWSCognitoUserPool_withSmsVerificationMessage(t *testing.T) { updatedAuthenticationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) verificationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) upatedVerificationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -253,16 +263,21 @@ func TestAccAWSCognitoUserPool_withSmsVerificationMessage(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withSmsVerificationMessage(name, authenticationMessage, verificationMessage), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_authentication_message", authenticationMessage), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", verificationMessage), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "sms_authentication_message", authenticationMessage), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", verificationMessage), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withSmsVerificationMessage(name, updatedAuthenticationMessage, upatedVerificationMessage), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_authentication_message", updatedAuthenticationMessage), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", upatedVerificationMessage), + resource.TestCheckResourceAttr(resourceName, "sms_authentication_message", updatedAuthenticationMessage), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", upatedVerificationMessage), ), }, }, @@ -272,6 +287,7 @@ func TestAccAWSCognitoUserPool_withSmsVerificationMessage(t *testing.T) { func TestAccAWSCognitoUserPool_withEmailConfiguration(t *testing.T) { name := acctest.RandString(5) replyTo := fmt.Sprintf("tf-acc-reply-%s@terraformtesting.com", name) + resourceName := "aws_cognito_user_pool.test" sourceARN, ok := os.LookupEnv("TEST_AWS_SES_VERIFIED_EMAIL_ARN") if !ok { @@ -286,18 +302,23 @@ func TestAccAWSCognitoUserPool_withEmailConfiguration(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withEmailConfiguration(name, "", "", "COGNITO_DEFAULT"), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.0.reply_to_email_address", ""), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.0.email_sending_account", "COGNITO_DEFAULT"), + resource.TestCheckResourceAttr(resourceName, "email_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "email_configuration.0.reply_to_email_address", ""), + resource.TestCheckResourceAttr(resourceName, "email_configuration.0.email_sending_account", "COGNITO_DEFAULT"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withEmailConfiguration(name, replyTo, sourceARN, "DEVELOPER"), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.0.reply_to_email_address", replyTo), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.0.email_sending_account", "DEVELOPER"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_configuration.0.source_arn", sourceARN), + resource.TestCheckResourceAttr(resourceName, "email_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "email_configuration.0.reply_to_email_address", replyTo), + resource.TestCheckResourceAttr(resourceName, "email_configuration.0.email_sending_account", "DEVELOPER"), + resource.TestCheckResourceAttr(resourceName, "email_configuration.0.source_arn", sourceARN), ), }, }, @@ -308,6 +329,7 @@ func TestAccAWSCognitoUserPool_withEmailConfiguration(t *testing.T) { // taking some time. func TestAccAWSCognitoUserPool_withSmsConfiguration(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -317,11 +339,16 @@ func TestAccAWSCognitoUserPool_withSmsConfiguration(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withSmsConfiguration(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.external_id"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.sns_caller_arn"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.external_id"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.sns_caller_arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -329,6 +356,7 @@ func TestAccAWSCognitoUserPool_withSmsConfiguration(t *testing.T) { // Ensure we can update a User Pool, handling IAM role propagation. func TestAccAWSCognitoUserPool_withSmsConfigurationUpdated(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -338,16 +366,21 @@ func TestAccAWSCognitoUserPool_withSmsConfigurationUpdated(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_basic(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "0"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withSmsConfiguration(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.external_id"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.sns_caller_arn"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.external_id"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.sns_caller_arn"), ), }, }, @@ -356,6 +389,7 @@ func TestAccAWSCognitoUserPool_withSmsConfigurationUpdated(t *testing.T) { func TestAccAWSCognitoUserPool_withTags(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -365,15 +399,20 @@ func TestAccAWSCognitoUserPool_withTags(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withTags(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Name", "Foo"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "Foo"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withTagsUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Name", "FooBar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Project", "Terraform"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "FooBar"), + resource.TestCheckResourceAttr(resourceName, "tags.Project", "Terraform"), ), }, }, @@ -382,6 +421,7 @@ func TestAccAWSCognitoUserPool_withTags(t *testing.T) { func TestAccAWSCognitoUserPool_withAliasAttributes(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -391,20 +431,25 @@ func TestAccAWSCognitoUserPool_withAliasAttributes(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withAliasAttributes(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "alias_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "alias_attributes.1888159429", "preferred_username"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.#", "0"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "alias_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "alias_attributes.1888159429", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withAliasAttributesUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "alias_attributes.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "alias_attributes.881205744", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "alias_attributes.1888159429", "preferred_username"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "alias_attributes.#", "2"), + resource.TestCheckResourceAttr(resourceName, "alias_attributes.881205744", "email"), + resource.TestCheckResourceAttr(resourceName, "alias_attributes.1888159429", "preferred_username"), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.881205744", "email"), ), }, }, @@ -413,6 +458,7 @@ func TestAccAWSCognitoUserPool_withAliasAttributes(t *testing.T) { func TestAccAWSCognitoUserPool_withPasswordPolicy(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -422,24 +468,29 @@ func TestAccAWSCognitoUserPool_withPasswordPolicy(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withPasswordPolicy(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.minimum_length", "7"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_lowercase", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_numbers", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_symbols", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_uppercase", "false"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "password_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.minimum_length", "7"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_lowercase", "true"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_numbers", "false"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_symbols", "true"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_uppercase", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withPasswordPolicyUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.minimum_length", "9"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_lowercase", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_numbers", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_symbols", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "password_policy.0.require_uppercase", "true"), + resource.TestCheckResourceAttr(resourceName, "password_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.minimum_length", "9"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_lowercase", "false"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_numbers", "true"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_symbols", "false"), + resource.TestCheckResourceAttr(resourceName, "password_policy.0.require_uppercase", "true"), ), }, }, @@ -448,6 +499,7 @@ func TestAccAWSCognitoUserPool_withPasswordPolicy(t *testing.T) { func TestAccAWSCognitoUserPool_withLambdaConfig(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -457,34 +509,39 @@ func TestAccAWSCognitoUserPool_withLambdaConfig(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withLambdaConfig(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "lambda_config.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.create_auth_challenge"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.custom_message"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.define_auth_challenge"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.post_authentication"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.post_confirmation"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_authentication"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_sign_up"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_token_generation"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.user_migration"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.verify_auth_challenge_response"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lambda_config.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.create_auth_challenge"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.custom_message"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.define_auth_challenge"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.post_authentication"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.post_confirmation"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_authentication"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_sign_up"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_token_generation"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.user_migration"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.verify_auth_challenge_response"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withLambdaConfigUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "lambda_config.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.create_auth_challenge"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.custom_message"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.define_auth_challenge"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.post_authentication"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.post_confirmation"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_authentication"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_sign_up"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.pre_token_generation"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.user_migration"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.main", "lambda_config.0.verify_auth_challenge_response"), + resource.TestCheckResourceAttr(resourceName, "lambda_config.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.create_auth_challenge"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.custom_message"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.define_auth_challenge"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.post_authentication"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.post_confirmation"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_authentication"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_sign_up"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.pre_token_generation"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.user_migration"), + resource.TestCheckResourceAttrSet(resourceName, "lambda_config.0.verify_auth_challenge_response"), ), }, }, @@ -493,6 +550,7 @@ func TestAccAWSCognitoUserPool_withLambdaConfig(t *testing.T) { func TestAccAWSCognitoUserPool_withSchemaAttributes(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -502,61 +560,66 @@ func TestAccAWSCognitoUserPool_withSchemaAttributes(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withSchemaAttributes(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.main"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.#", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.attribute_data_type", "String"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.developer_only_attribute", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.mutable", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.name", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.number_attribute_constraints.#", "0"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.required", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.string_attribute_constraints.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.string_attribute_constraints.0.min_length", "5"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.145451252.string_attribute_constraints.0.max_length", "10"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.attribute_data_type", "Boolean"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.developer_only_attribute", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.mutable", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.name", "mybool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.number_attribute_constraints.#", "0"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.required", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.770828826.string_attribute_constraints.#", "0"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "schema.#", "2"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.attribute_data_type", "String"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.developer_only_attribute", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.mutable", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.name", "email"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.number_attribute_constraints.#", "0"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.required", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.string_attribute_constraints.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.string_attribute_constraints.0.min_length", "5"), + resource.TestCheckResourceAttr(resourceName, "schema.145451252.string_attribute_constraints.0.max_length", "10"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.attribute_data_type", "Boolean"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.developer_only_attribute", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.mutable", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.name", "mybool"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.number_attribute_constraints.#", "0"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.required", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.770828826.string_attribute_constraints.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withSchemaAttributesUpdated(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.#", "3"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.attribute_data_type", "String"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.developer_only_attribute", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.mutable", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.name", "email"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.number_attribute_constraints.#", "0"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.required", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.string_attribute_constraints.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.string_attribute_constraints.0.min_length", "7"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2078884933.string_attribute_constraints.0.max_length", "15"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.attribute_data_type", "Number"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.developer_only_attribute", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.mutable", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.name", "mynumber"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.number_attribute_constraints.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.number_attribute_constraints.0.min_value", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.number_attribute_constraints.0.max_value", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.required", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2718111653.string_attribute_constraints.#", "0"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.attribute_data_type", "Number"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.developer_only_attribute", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.mutable", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.name", "mynondevnumber"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.number_attribute_constraints.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.number_attribute_constraints.0.min_value", "2"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.number_attribute_constraints.0.max_value", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.required", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.main", "schema.2753746449.string_attribute_constraints.#", "0"), + resource.TestCheckResourceAttr(resourceName, "schema.#", "3"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.attribute_data_type", "String"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.developer_only_attribute", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.mutable", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.name", "email"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.number_attribute_constraints.#", "0"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.required", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.string_attribute_constraints.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.string_attribute_constraints.0.min_length", "7"), + resource.TestCheckResourceAttr(resourceName, "schema.2078884933.string_attribute_constraints.0.max_length", "15"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.attribute_data_type", "Number"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.developer_only_attribute", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.mutable", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.name", "mynumber"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.number_attribute_constraints.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.number_attribute_constraints.0.min_value", "2"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.number_attribute_constraints.0.max_value", "6"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.required", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.2718111653.string_attribute_constraints.#", "0"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.attribute_data_type", "Number"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.developer_only_attribute", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.mutable", "true"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.name", "mynondevnumber"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.number_attribute_constraints.#", "1"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.number_attribute_constraints.0.min_value", "2"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.number_attribute_constraints.0.max_value", "6"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.required", "false"), + resource.TestCheckResourceAttr(resourceName, "schema.2753746449.string_attribute_constraints.#", "0"), ), }, { - ResourceName: "aws_cognito_user_pool.main", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -566,6 +629,7 @@ func TestAccAWSCognitoUserPool_withSchemaAttributes(t *testing.T) { func TestAccAWSCognitoUserPool_withVerificationMessageTemplate(t *testing.T) { name := acctest.RandString(5) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -575,36 +639,41 @@ func TestAccAWSCognitoUserPool_withVerificationMessageTemplate(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_withVerificationMessageTemplate(name), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.default_email_option", "CONFIRM_WITH_LINK"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_message", "foo {####} bar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_message_by_link", "{##foobar##}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_subject", "foobar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_subject_by_link", "foobar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.sms_message", "{####} baz"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.default_email_option", "CONFIRM_WITH_LINK"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_message", "foo {####} bar"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_message_by_link", "{##foobar##}"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_subject", "foobar {####}"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_subject_by_link", "foobar"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.sms_message", "{####} baz"), /* Setting Verification template attributes like EmailMessage, EmailSubject or SmsMessage will implicitly set EmailVerificationMessage, EmailVerificationSubject and SmsVerificationMessage attributes. */ - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", "foo {####} bar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", "foobar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", "{####} baz"), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", "foo {####} bar"), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", "foobar {####}"), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", "{####} baz"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_withVerificationMessageTemplate_DefaultEmailOption(name), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", "{####} Baz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", "BazBaz {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", "{####} BazBazBar?"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", "{####} Baz"), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", "BazBaz {####}"), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", "{####} BazBazBar?"), /* Setting EmailVerificationMessage, EmailVerificationSubject and SmsVerificationMessage attributes will implicitly set verification template attributes like EmailMessage, EmailSubject or SmsMessage. */ - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_message", "{####} Baz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.email_subject", "BazBaz {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.sms_message", "{####} BazBazBar?"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_message", "{####} Baz"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.email_subject", "BazBaz {####}"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.sms_message", "{####} BazBazBar?"), ), }, }, @@ -617,6 +686,7 @@ func TestAccAWSCognitoUserPool_update(t *testing.T) { offMfa := "OFF" authenticationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) updatedAuthenticationMessage := fmt.Sprintf("%s {####}", acctest.RandString(10)) + resourceName := "aws_cognito_user_pool.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCognitoIdentityProvider(t) }, @@ -626,73 +696,78 @@ func TestAccAWSCognitoUserPool_update(t *testing.T) { { Config: testAccAWSCognitoUserPoolConfig_update(name, optionalMfa, authenticationMessage), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "mfa_configuration", optionalMfa), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", "Foo {####} Bar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", "{####} Baz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_authentication_message", authenticationMessage), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.unused_account_validity_days", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.allow_admin_create_user_only", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.challenge_required_on_new_device", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.device_only_remembered_on_user_prompt", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.external_id"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.sns_caller_arn"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Name", "Foo"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mfa_configuration", optionalMfa), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", "Foo {####} Bar"), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", "{####} Baz"), + resource.TestCheckResourceAttr(resourceName, "sms_authentication_message", authenticationMessage), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.unused_account_validity_days", "6"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.allow_admin_create_user_only", "true"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.challenge_required_on_new_device", "true"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.device_only_remembered_on_user_prompt", "false"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.external_id"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.sns_caller_arn"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "Foo"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCognitoUserPoolConfig_update(name, optionalMfa, updatedAuthenticationMessage), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "mfa_configuration", optionalMfa), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", "Foo {####} Bar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", "{####} Baz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_authentication_message", updatedAuthenticationMessage), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.unused_account_validity_days", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.allow_admin_create_user_only", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.challenge_required_on_new_device", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.device_only_remembered_on_user_prompt", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.external_id"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.sns_caller_arn"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Name", "Foo"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mfa_configuration", optionalMfa), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", "Foo {####} Bar"), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", "{####} Baz"), + resource.TestCheckResourceAttr(resourceName, "sms_authentication_message", updatedAuthenticationMessage), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.unused_account_validity_days", "6"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.allow_admin_create_user_only", "true"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.challenge_required_on_new_device", "true"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.device_only_remembered_on_user_prompt", "false"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.external_id"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.sns_caller_arn"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "Foo"), ), }, { Config: testAccAWSCognitoUserPoolConfig_update(name, offMfa, updatedAuthenticationMessage), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckAWSCognitoUserPoolExists("aws_cognito_user_pool.pool"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "auto_verified_attributes.#", "1"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "mfa_configuration", offMfa), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_message", "Foo {####} Bar"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "email_verification_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_verification_message", "{####} Baz"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_authentication_message", updatedAuthenticationMessage), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.unused_account_validity_days", "6"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.allow_admin_create_user_only", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.challenge_required_on_new_device", "true"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "device_configuration.0.device_only_remembered_on_user_prompt", "false"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "sms_configuration.#", "1"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.external_id"), - resource.TestCheckResourceAttrSet("aws_cognito_user_pool.pool", "sms_configuration.0.sns_caller_arn"), - resource.TestCheckResourceAttr("aws_cognito_user_pool.pool", "tags.Name", "Foo"), + testAccCheckAWSCognitoUserPoolExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", "1"), + resource.TestCheckResourceAttr(resourceName, "mfa_configuration", offMfa), + resource.TestCheckResourceAttr(resourceName, "email_verification_message", "Foo {####} Bar"), + resource.TestCheckResourceAttr(resourceName, "email_verification_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "sms_verification_message", "{####} Baz"), + resource.TestCheckResourceAttr(resourceName, "sms_authentication_message", updatedAuthenticationMessage), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.unused_account_validity_days", "6"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.allow_admin_create_user_only", "true"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_message", "Your username is {username} and temporary password is {####}. "), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.email_subject", "FooBar {####}"), + resource.TestCheckResourceAttr(resourceName, "admin_create_user_config.0.invite_message_template.0.sms_message", "Your username is {username} and temporary password is {####}."), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.challenge_required_on_new_device", "true"), + resource.TestCheckResourceAttr(resourceName, "device_configuration.0.device_only_remembered_on_user_prompt", "false"), + resource.TestCheckResourceAttr(resourceName, "verification_message_template.0.default_email_option", "CONFIRM_WITH_CODE"), + resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.external_id"), + resource.TestCheckResourceAttrSet(resourceName, "sms_configuration.0.sns_caller_arn"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", "Foo"), ), }, }, @@ -767,7 +842,7 @@ func testAccPreCheckAWSCognitoIdentityProvider(t *testing.T) { func testAccAWSCognitoUserPoolConfig_basic(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" } `, name) @@ -775,7 +850,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withAdminCreateUserConfiguration(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" admin_create_user_config { @@ -794,7 +869,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withAdminCreateUserConfigurationUpdated(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" admin_create_user_config { @@ -813,7 +888,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withAdvancedSecurityMode(name string, mode string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" user_pool_add_ons { @@ -825,7 +900,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withDeviceConfiguration(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" device_configuration { @@ -838,7 +913,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withDeviceConfigurationUpdated(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" device_configuration { @@ -851,7 +926,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withEmailVerificationMessage(name, subject, message string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" email_verification_subject = "%s" email_verification_message = "%s" @@ -865,7 +940,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withSmsVerificationMessage(name, authenticationMessage, verificationMessage string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" sms_authentication_message = "%s" sms_verification_message = "%s" @@ -875,7 +950,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withTags(name string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%s" tags = { @@ -887,7 +962,7 @@ resource "aws_cognito_user_pool" "pool" { func testAccAWSCognitoUserPoolConfig_withEmailConfiguration(name, email, arn, account string) string { return fmt.Sprintf(` -resource "aws_cognito_user_pool" "pool" { +resource "aws_cognito_user_pool" "test" { name = "terraform-test-pool-%[1]s" @@ -903,7 +978,7 @@ func testAccAWSCognitoUserPoolConfig_withSmsConfiguration(name string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} -resource "aws_iam_role" "main" { +resource "aws_iam_role" "test" { name = "test-role-%[1]s" path = "/service-role/" @@ -929,9 +1004,9 @@ resource "aws_iam_role" "main" { POLICY } -resource "aws_iam_role_policy" "main" { +resource "aws_iam_role_policy" "test" { name = "test-role-policy-%[1]s" - role = "${aws_iam_role.main.id}" + role = "${aws_iam_role.test.id}" policy = < Date: Tue, 22 Oct 2019 15:13:53 +0200 Subject: [PATCH 16/55] Import test refactor for codepipeline --- aws/resource_aws_codepipeline_test.go | 108 +++++++++++++------------- 1 file changed, 53 insertions(+), 55 deletions(-) diff --git a/aws/resource_aws_codepipeline_test.go b/aws/resource_aws_codepipeline_test.go index c6e0f5bd70c..22bb36b486a 100644 --- a/aws/resource_aws_codepipeline_test.go +++ b/aws/resource_aws_codepipeline_test.go @@ -13,37 +13,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCodePipeline_Import_basic(t *testing.T) { - if os.Getenv("GITHUB_TOKEN") == "" { - t.Skip("Environment variable GITHUB_TOKEN is not set") - } - - name := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodePipelineDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodePipelineConfig_basic(name), - }, - - { - ResourceName: "aws_codepipeline.bar", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSCodePipeline_basic(t *testing.T) { if os.Getenv("GITHUB_TOKEN") == "" { t.Skip("Environment variable GITHUB_TOKEN is not set") } name := acctest.RandString(10) + resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, @@ -53,21 +29,26 @@ func TestAccAWSCodePipeline_basic(t *testing.T) { { Config: testAccAWSCodePipelineConfig_basic(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestMatchResourceAttr("aws_codepipeline.bar", "arn", + testAccCheckAWSCodePipelineExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:aws:codepipeline:[^:]+:[0-9]{12}:test-pipeline-%s", name))), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.id", "1234"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.type", "KMS"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.type", "S3"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.encryption_key.0.id", "1234"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.encryption_key.0.type", "KMS"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodePipelineConfig_basicUpdated(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.id", "4567"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.encryption_key.0.type", "KMS"), + testAccCheckAWSCodePipelineExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.type", "S3"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.encryption_key.0.id", "4567"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.encryption_key.0.type", "KMS"), ), }, }, @@ -80,6 +61,7 @@ func TestAccAWSCodePipeline_emptyArtifacts(t *testing.T) { } name := acctest.RandString(10) + resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, @@ -89,21 +71,26 @@ func TestAccAWSCodePipeline_emptyArtifacts(t *testing.T) { { Config: testAccAWSCodePipelineConfig_emptyArtifacts(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestMatchResourceAttr("aws_codepipeline.bar", "arn", + testAccCheckAWSCodePipelineExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(fmt.Sprintf("^arn:aws:codepipeline:[^:]+:[0-9]{12}:test-pipeline-%s", name))), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "artifact_store.0.type", "S3"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.name", "Build"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.#", "1"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.name", "Build"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.category", "Build"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.owner", "AWS"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.provider", "CodeBuild"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.input_artifacts.#", "1"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.1.action.0.output_artifacts.#", "0"), + resource.TestCheckResourceAttr(resourceName, "artifact_store.0.type", "S3"), + resource.TestCheckResourceAttr(resourceName, "stage.1.name", "Build"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.name", "Build"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.category", "Build"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.owner", "AWS"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.provider", "CodeBuild"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.input_artifacts.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stage.1.action.0.output_artifacts.#", "0"), ), ExpectNonEmptyPlan: true, }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -114,6 +101,7 @@ func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { } name := acctest.RandString(10) + resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, @@ -123,14 +111,19 @@ func TestAccAWSCodePipeline_deployWithServiceRole(t *testing.T) { { Config: testAccAWSCodePipelineConfig_deployWithServiceRole(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSCodePipelineExists("aws_codepipeline.bar"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.2.name", "Deploy"), - resource.TestCheckResourceAttr("aws_codepipeline.bar", "stage.2.action.0.category", "Deploy"), + testAccCheckAWSCodePipelineExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "stage.2.name", "Deploy"), + resource.TestCheckResourceAttr(resourceName, "stage.2.action.0.category", "Deploy"), resource.TestMatchResourceAttr( - "aws_codepipeline.bar", "stage.2.action.0.role_arn", + resourceName, "stage.2.action.0.role_arn", regexp.MustCompile("^arn:aws:iam::[0-9]{12}:role/codepipeline-action-role.*")), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -141,7 +134,7 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { } name := acctest.RandString(10) - resourceName := "aws_codepipeline.bar" + resourceName := "aws_codepipeline.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodePipeline(t) }, @@ -158,6 +151,11 @@ func TestAccAWSCodePipeline_tags(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.tag2", "tag2value"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodePipelineConfigWithTags(name, "tag1valueUpdate", "tag2valueUpdate"), Check: resource.ComposeTestCheckFunc( @@ -301,7 +299,7 @@ resource "aws_iam_role_policy" "codepipeline_policy" { EOF } -resource "aws_codepipeline" "bar" { +resource "aws_codepipeline" "test" { name = "test-pipeline-%s" role_arn = "${aws_iam_role.codepipeline_role.arn}" @@ -413,7 +411,7 @@ resource "aws_iam_role_policy" "codepipeline_policy" { EOF } -resource "aws_codepipeline" "bar" { +resource "aws_codepipeline" "test" { name = "test-pipeline-%s" role_arn = "${aws_iam_role.codepipeline_role.arn}" @@ -525,7 +523,7 @@ resource "aws_iam_role_policy" "codepipeline_policy" { EOF } -resource "aws_codepipeline" "bar" { +resource "aws_codepipeline" "test" { name = "test-pipeline-%s" role_arn = "${aws_iam_role.codepipeline_role.arn}" @@ -686,7 +684,7 @@ resource "aws_iam_role_policy" "codepipeline_action_policy" { EOF } -resource "aws_codepipeline" "bar" { +resource "aws_codepipeline" "test" { name = "test-pipeline-%s" role_arn = "${aws_iam_role.codepipeline_role.arn}" @@ -820,7 +818,7 @@ resource "aws_iam_role_policy" "codepipeline_policy" { EOF } -resource "aws_codepipeline" "bar" { +resource "aws_codepipeline" "test" { name = "test-pipeline-%[1]s" role_arn = "${aws_iam_role.codepipeline_role.arn}" From afc71de270f8e63603814f90d0c3c53c55f0c567 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Wed, 23 Oct 2019 11:02:42 +0200 Subject: [PATCH 17/55] Import test refactor for codecommit --- ...resource_aws_codecommit_repository_test.go | 81 +++++++++++-------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/aws/resource_aws_codecommit_repository_test.go b/aws/resource_aws_codecommit_repository_test.go index 6a8791d1bf5..074eb90fd1c 100644 --- a/aws/resource_aws_codecommit_repository_test.go +++ b/aws/resource_aws_codecommit_repository_test.go @@ -12,9 +12,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCodeCommitRepository_importBasic(t *testing.T) { - resName := "aws_codecommit_repository.test" +func TestAccAWSCodeCommitRepository_basic(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_codecommit_repository.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,9 +23,12 @@ func TestAccAWSCodeCommitRepository_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccCodeCommitRepository_basic(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), + ), }, { - ResourceName: resName, + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -33,25 +36,10 @@ func TestAccAWSCodeCommitRepository_importBasic(t *testing.T) { }) } -func TestAccAWSCodeCommitRepository_basic(t *testing.T) { - rInt := acctest.RandInt() - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccCodeCommitRepository_basic(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test"), - ), - }, - }, - }) -} - func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_codecommit_repository.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -65,6 +53,11 @@ func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { "aws_codecommit_repository.test", "description", "This is a test description"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCodeCommitRepository_withChanges(rInt), Check: resource.ComposeTestCheckFunc( @@ -79,6 +72,8 @@ func TestAccAWSCodeCommitRepository_withChanges(t *testing.T) { func TestAccAWSCodeCommitRepository_create_default_branch(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_codecommit_repository.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -92,12 +87,20 @@ func TestAccAWSCodeCommitRepository_create_default_branch(t *testing.T) { "aws_codecommit_repository.test", "default_branch", "master"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_branch"}, + }, }, }) } func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing.T) { rInt := acctest.RandInt() + resourceName := "aws_codecommit_repository.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -111,6 +114,11 @@ func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing. "aws_codecommit_repository.test", "default_branch"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCodeCommitRepository_with_default_branch(rInt), Check: resource.ComposeTestCheckFunc( @@ -124,38 +132,43 @@ func TestAccAWSCodeCommitRepository_create_and_update_default_branch(t *testing. } func TestAccAWSCodeCommitRepository_tags(t *testing.T) { - rName := acctest.RandString(10) + resourceName := "aws_codecommit_repository.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_codecommit_repository.test_repository", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckCodeCommitRepositoryDestroy, Steps: []resource.TestStep{ { Config: testAccAWSCodeCommitRepositoryConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test_repository"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.key1", "value1"), + testAccCheckCodeCommitRepositoryExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeCommitRepositoryConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test_repository"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.key1", "value1updated"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.key2", "value2"), + testAccCheckCodeCommitRepositoryExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, { Config: testAccAWSCodeCommitRepositoryConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckCodeCommitRepositoryExists("aws_codecommit_repository.test_repository"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_codecommit_repository.test_repository", "tags.key2", "value2"), + testAccCheckCodeCommitRepositoryExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, }, @@ -249,7 +262,7 @@ resource "aws_codecommit_repository" "test" { func testAccAWSCodeCommitRepositoryConfigTags1(r, tag1Key, tag1Value string) string { return fmt.Sprintf(` -resource "aws_codecommit_repository" "test_repository" { +resource "aws_codecommit_repository" "test" { repository_name = "terraform-test-%s" tags = { %q = %q @@ -259,7 +272,7 @@ resource "aws_codecommit_repository" "test_repository" { func testAccAWSCodeCommitRepositoryConfigTags2(r, tag1Key, tag1Value, tag2Key, tag2Value string) string { return fmt.Sprintf(` -resource "aws_codecommit_repository" "test_repository" { +resource "aws_codecommit_repository" "test" { repository_name = "terraform-test-%s" tags = { %q = %q From 82ce5a6ec90cfe9a99aa0cb00a5a022ecc190d70 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Wed, 23 Oct 2019 11:39:43 +0200 Subject: [PATCH 18/55] Import test refactor for codebuild project --- aws/resource_aws_codebuild_project_test.go | 136 +++++++++++++++++---- 1 file changed, 115 insertions(+), 21 deletions(-) diff --git a/aws/resource_aws_codebuild_project_test.go b/aws/resource_aws_codebuild_project_test.go index da19d2c9270..df0957f26ff 100644 --- a/aws/resource_aws_codebuild_project_test.go +++ b/aws/resource_aws_codebuild_project_test.go @@ -39,27 +39,6 @@ func testAccAWSCodeBuildGitHubSourceLocationFromEnv() string { return sourceLocation } -func TestAccAWSCodeBuildProject_importBasic(t *testing.T) { - resourceName := "aws_codebuild_project.test" - rName := acctest.RandomWithPrefix("tf-acc-test") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCodeBuild(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCodeBuildProjectDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCodeBuildProjectConfig_basic(rName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSCodeBuildProject_basic(t *testing.T) { var project codebuild.Project rName := acctest.RandomWithPrefix("tf-acc-test") @@ -130,6 +109,11 @@ func TestAccAWSCodeBuildProject_BadgeEnabled(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "badge_url", regexp.MustCompile(`\b(https?).*\b`)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -151,6 +135,11 @@ func TestAccAWSCodeBuildProject_BuildTimeout(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "build_timeout", "120"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_BuildTimeout(rName, 240), Check: resource.ComposeTestCheckFunc( @@ -184,6 +173,11 @@ func TestAccAWSCodeBuildProject_Cache(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "cache.0.type", "NO_CACHE"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_basic(rName), Check: resource.ComposeTestCheckFunc( @@ -248,6 +242,11 @@ func TestAccAWSCodeBuildProject_Description(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "description", "description1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Description(rName, "description2"), Check: resource.ComposeTestCheckFunc( @@ -276,6 +275,11 @@ func TestAccAWSCodeBuildProject_EncryptionKey(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "encryption_key", regexp.MustCompile(`.+`)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -348,6 +352,11 @@ func TestAccAWSCodeBuildProject_Environment_EnvironmentVariable_Type(t *testing. resource.TestCheckResourceAttr(resourceName, "environment.4178155002.environment_variable.1.type", "PLAINTEXT"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Environment_EnvironmentVariable_Type(rName, "PARAMETER_STORE"), Check: resource.ComposeTestCheckFunc( @@ -379,6 +388,11 @@ func TestAccAWSCodeBuildProject_Environment_Certificate(t *testing.T) { testAccCheckAWSCodeBuildProjectCertificate(&project, fmt.Sprintf("%s/%s", bName, oName)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -495,6 +509,11 @@ func TestAccAWSCodeBuildProject_Source_Auth(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.3680505372.auth.2706882902.type", "OAUTH"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -516,6 +535,11 @@ func TestAccAWSCodeBuildProject_Source_GitCloneDepth(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.1181740906.git_clone_depth", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Source_GitCloneDepth(rName, 2), Check: resource.ComposeTestCheckFunc( @@ -544,6 +568,11 @@ func TestAccAWSCodeBuildProject_Source_InsecureSSL(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.1976396802.insecure_ssl", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Source_InsecureSSL(rName, false), Check: resource.ComposeTestCheckFunc( @@ -572,6 +601,11 @@ func TestAccAWSCodeBuildProject_Source_ReportBuildStatus_Bitbucket(t *testing.T) resource.TestCheckResourceAttr(resourceName, "source.2876219937.report_build_status", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_Bitbucket(rName, false), Check: resource.ComposeTestCheckFunc( @@ -600,6 +634,11 @@ func TestAccAWSCodeBuildProject_Source_ReportBuildStatus_GitHub(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.4215890488.report_build_status", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_GitHub(rName, false), Check: resource.ComposeTestCheckFunc( @@ -628,6 +667,11 @@ func TestAccAWSCodeBuildProject_Source_ReportBuildStatus_GitHubEnterprise(t *tes resource.TestCheckResourceAttr(resourceName, "source.2964899175.report_build_status", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Source_ReportBuildStatus_GitHubEnterprise(rName, false), Check: resource.ComposeTestCheckFunc( @@ -656,6 +700,11 @@ func TestAccAWSCodeBuildProject_Source_Type_Bitbucket(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.3210444828.type", "BITBUCKET"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -677,6 +726,11 @@ func TestAccAWSCodeBuildProject_Source_Type_CodeCommit(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.3715340088.type", "CODECOMMIT"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -698,6 +752,11 @@ func TestAccAWSCodeBuildProject_Source_Type_CodePipeline(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.2280907000.type", "CODEPIPELINE"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -719,6 +778,11 @@ func TestAccAWSCodeBuildProject_Source_Type_GitHubEnterprise(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "source.553628638.type", "GITHUB_ENTERPRISE"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -739,6 +803,11 @@ func TestAccAWSCodeBuildProject_Source_Type_S3(t *testing.T) { testAccCheckAWSCodeBuildProjectExists(resourceName, &project), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -766,6 +835,11 @@ phases: resource.TestCheckResourceAttr(resourceName, "source.2726343112.type", "NO_SOURCE"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -815,6 +889,11 @@ func TestAccAWSCodeBuildProject_Tags(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "tags.tag2", "tag2value"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_Tags(rName, "tag2", "tag2value-updated"), Check: resource.ComposeTestCheckFunc( @@ -848,6 +927,11 @@ func TestAccAWSCodeBuildProject_VpcConfig(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "vpc_config.0.vpc_id", regexp.MustCompile(`^vpc-`)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSCodeBuildProjectConfig_VpcConfig1(rName), Check: resource.ComposeTestCheckFunc( @@ -892,6 +976,11 @@ func TestAccAWSCodeBuildProject_WindowsContainer(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "environment.2306861956.type", "WINDOWS_CONTAINER"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1626,6 +1715,11 @@ func TestAccAWSCodeBuildProject_SecondarySources_CodeCommit(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "secondary_sources.2644986630.source_identifier", "secondarySource2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } From 2053742b5a95d624f5b5ca8bbf8f9bbe10e4dfad Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Wed, 23 Oct 2019 13:54:04 +0200 Subject: [PATCH 19/55] Import test refactor for cloudwatch resources --- aws/resource_aws_cloudwatch_dashboard_test.go | 49 +++-- ...resource_aws_cloudwatch_event_rule_test.go | 108 +++++------ ..._cloudwatch_log_destination_policy_test.go | 28 +-- ...rce_aws_cloudwatch_log_destination_test.go | 28 +-- aws/resource_aws_cloudwatch_log_group_test.go | 172 +++++++++++------- ...aws_cloudwatch_log_resource_policy_test.go | 44 ++--- 6 files changed, 205 insertions(+), 224 deletions(-) diff --git a/aws/resource_aws_cloudwatch_dashboard_test.go b/aws/resource_aws_cloudwatch_dashboard_test.go index c5908d260d5..02f0f0e8cbf 100644 --- a/aws/resource_aws_cloudwatch_dashboard_test.go +++ b/aws/resource_aws_cloudwatch_dashboard_test.go @@ -14,8 +14,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCloudWatchDashboard_importBasic(t *testing.T) { - resourceName := "aws_cloudwatch_dashboard.foobar" +func TestAccAWSCloudWatchDashboard_basic(t *testing.T) { + var dashboard cloudwatch.GetDashboardOutput + resourceName := "aws_cloudwatch_dashboard.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -25,6 +26,10 @@ func TestAccAWSCloudWatchDashboard_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSCloudWatchDashboardConfig(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchDashboardExists(resourceName, &dashboard), + resource.TestCheckResourceAttr(resourceName, "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), + ), }, { ResourceName: resourceName, @@ -35,9 +40,11 @@ func TestAccAWSCloudWatchDashboard_importBasic(t *testing.T) { }) } -func TestAccAWSCloudWatchDashboard_basic(t *testing.T) { +func TestAccAWSCloudWatchDashboard_update(t *testing.T) { var dashboard cloudwatch.GetDashboardOutput + resourceName := "aws_cloudwatch_dashboard.test" rInt := acctest.RandInt() + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -46,36 +53,22 @@ func TestAccAWSCloudWatchDashboard_basic(t *testing.T) { { Config: testAccAWSCloudWatchDashboardConfig(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchDashboardExists("aws_cloudwatch_dashboard.foobar", &dashboard), - resource.TestCheckResourceAttr("aws_cloudwatch_dashboard.foobar", "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), + testAccCheckCloudWatchDashboardExists(resourceName, &dashboard), + testAccCloudWatchCheckDashboardBodyIsExpected(resourceName, basicWidget), + resource.TestCheckResourceAttr(resourceName, "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), ), }, - }, - }) -} - -func TestAccAWSCloudWatchDashboard_update(t *testing.T) { - var dashboard cloudwatch.GetDashboardOutput - rInt := acctest.RandInt() - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudWatchDashboardDestroy, - Steps: []resource.TestStep{ { - Config: testAccAWSCloudWatchDashboardConfig(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchDashboardExists("aws_cloudwatch_dashboard.foobar", &dashboard), - testAccCloudWatchCheckDashboardBodyIsExpected("aws_cloudwatch_dashboard.foobar", basicWidget), - resource.TestCheckResourceAttr("aws_cloudwatch_dashboard.foobar", "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), - ), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { Config: testAccAWSCloudWatchDashboardConfig_updateBody(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudWatchDashboardExists("aws_cloudwatch_dashboard.foobar", &dashboard), - testAccCloudWatchCheckDashboardBodyIsExpected("aws_cloudwatch_dashboard.foobar", updatedWidget), - resource.TestCheckResourceAttr("aws_cloudwatch_dashboard.foobar", "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), + testAccCheckCloudWatchDashboardExists(resourceName, &dashboard), + testAccCloudWatchCheckDashboardBodyIsExpected(resourceName, updatedWidget), + resource.TestCheckResourceAttr(resourceName, "dashboard_name", testAccAWSCloudWatchDashboardName(rInt)), ), }, }, @@ -163,7 +156,7 @@ func testAccAWSCloudWatchDashboardName(rInt int) string { func testAccAWSCloudWatchDashboardConfig(rInt int) string { return fmt.Sprintf(` -resource "aws_cloudwatch_dashboard" "foobar" { +resource "aws_cloudwatch_dashboard" "test" { dashboard_name = "terraform-test-dashboard-%d" dashboard_body = < Date: Wed, 23 Oct 2019 15:17:28 +0200 Subject: [PATCH 20/55] Import test refactor for cloudy resources --- ...esource_aws_cloud9_environment_ec2_test.go | 38 ++-- aws/resource_aws_cloudformation_stack_test.go | 167 +++++++++++------- ..._cloudfront_origin_access_identity_test.go | 61 +++---- 3 files changed, 139 insertions(+), 127 deletions(-) diff --git a/aws/resource_aws_cloud9_environment_ec2_test.go b/aws/resource_aws_cloud9_environment_ec2_test.go index 0399b78edba..3f24b571443 100644 --- a/aws/resource_aws_cloud9_environment_ec2_test.go +++ b/aws/resource_aws_cloud9_environment_ec2_test.go @@ -18,7 +18,6 @@ func TestAccAWSCloud9EnvironmentEc2_basic(t *testing.T) { rString := acctest.RandString(8) envName := fmt.Sprintf("tf_acc_env_basic_%s", rString) uEnvName := fmt.Sprintf("tf_acc_env_basic_updated_%s", rString) - resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ @@ -36,6 +35,12 @@ func TestAccAWSCloud9EnvironmentEc2_basic(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "owner_arn", regexp.MustCompile(`^arn:`)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_type"}, + }, { Config: testAccAWSCloud9EnvironmentEc2Config(uEnvName), Check: resource.ComposeTestCheckFunc( @@ -59,7 +64,6 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { description := fmt.Sprintf("Tf Acc Test %s", rString) uDescription := fmt.Sprintf("Tf Acc Test Updated %s", rString) userName := fmt.Sprintf("tf_acc_cloud9_env_%s", rString) - resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ @@ -78,6 +82,12 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "ec2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_type", "automatic_stop_time_minutes", "subnet_id"}, + }, { Config: testAccAWSCloud9EnvironmentEc2AllFieldsConfig(uEnvName, uDescription, userName), Check: resource.ComposeTestCheckFunc( @@ -93,30 +103,6 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { }) } -func TestAccAWSCloud9EnvironmentEc2_importBasic(t *testing.T) { - rString := acctest.RandString(8) - name := fmt.Sprintf("tf_acc_api_doc_part_import_%s", rString) - - resourceName := "aws_cloud9_environment_ec2.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloud9(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloud9EnvironmentEc2Destroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloud9EnvironmentEc2Config(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"instance_type"}, - }, - }, - }) -} - func testAccCheckAWSCloud9EnvironmentEc2Exists(n string, res *cloud9.Environment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_cloudformation_stack_test.go b/aws/resource_aws_cloudformation_stack_test.go index bc0d3c52274..9431e13c8cb 100644 --- a/aws/resource_aws_cloudformation_stack_test.go +++ b/aws/resource_aws_cloudformation_stack_test.go @@ -11,10 +11,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { +func TestAccAWSCloudFormationStack_basic(t *testing.T) { + var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) - - resourceName := "aws_cloudformation_stack.network" + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,6 +23,9 @@ func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSCloudFormationStackConfig(stackName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists(resourceName, &stack), + ), }, { ResourceName: resourceName, @@ -33,28 +36,10 @@ func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { }) } -func TestAccAWSCloudFormationStack_basic(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationStackConfig(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), - ), - }, - }, - }) -} - func TestAccAWSCloudFormationStack_disappears(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -64,7 +49,7 @@ func TestAccAWSCloudFormationStack_disappears(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), testAccCheckCloudFormationStackDisappears(&stack), ), ExpectNonEmptyPlan: true, @@ -76,6 +61,7 @@ func TestAccAWSCloudFormationStack_disappears(t *testing.T) { func TestAccAWSCloudFormationStack_yaml(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-yaml-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -85,9 +71,14 @@ func TestAccAWSCloudFormationStack_yaml(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_yaml(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.yaml", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -95,6 +86,7 @@ func TestAccAWSCloudFormationStack_yaml(t *testing.T) { func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-default-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -104,9 +96,15 @@ func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_defaultParams(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parameters"}, + }, }, }) } @@ -114,8 +112,9 @@ func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-all-attributes-%s", acctest.RandString(10)) - + resourceName := "aws_cloudformation_stack.test" expectedPolicyBody := "{\"Statement\":[{\"Action\":\"Update:*\",\"Effect\":\"Deny\",\"Principal\":\"*\",\"Resource\":\"LogicalResourceId/StaticVPC\"},{\"Action\":\"Update:*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\"}]}" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -124,37 +123,43 @@ func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_allAttributesWithBodies(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), + testAccCheckCloudFormationStackExists(resourceName, &stack), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "capabilities.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capabilities.1328347040", "CAPABILITY_IAM"), + resource.TestCheckResourceAttr(resourceName, "disable_rollback", "false"), + resource.TestCheckResourceAttr(resourceName, "notification_arns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.VpcCIDR", "10.0.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "policy_body", expectedPolicyBody), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.First", "Mickey"), + resource.TestCheckResourceAttr(resourceName, "tags.Second", "Mouse"), + resource.TestCheckResourceAttr(resourceName, "timeout_in_minutes", "10"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "policy_body"}, + }, { Config: testAccAWSCloudFormationStackConfig_allAttributesWithBodies_modified(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), + testAccCheckCloudFormationStackExists(resourceName, &stack), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "capabilities.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capabilities.1328347040", "CAPABILITY_IAM"), + resource.TestCheckResourceAttr(resourceName, "disable_rollback", "false"), + resource.TestCheckResourceAttr(resourceName, "notification_arns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.VpcCIDR", "10.0.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "policy_body", expectedPolicyBody), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.First", "Mickey"), + resource.TestCheckResourceAttr(resourceName, "tags.Second", "Mouse"), + resource.TestCheckResourceAttr(resourceName, "timeout_in_minutes", "10"), ), }, }, @@ -165,6 +170,7 @@ func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { func TestAccAWSCloudFormationStack_withParams(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-with-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -174,13 +180,19 @@ func TestAccAWSCloudFormationStack_withParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_withParams(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters"}, + }, { Config: testAccAWSCloudFormationStackConfig_withParams_modified(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -191,6 +203,7 @@ func TestAccAWSCloudFormationStack_withParams(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-url-and-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -200,13 +213,19 @@ func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "13.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -216,6 +235,7 @@ func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-params-and-yaml-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -223,11 +243,17 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams_withYaml(rName, "tf-cf-stack.yaml", "13.0.0.0/16"), + Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams_withYaml(rName, "tf-cf-stack.test", "13.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params-and-yaml", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, }, }) } @@ -236,6 +262,7 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams_noUpdate(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-params-no-update-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -245,13 +272,19 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_noUpdate(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack-1.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack-2.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -336,7 +369,7 @@ func testAccCheckCloudFormationStackDisappears(stack *cloudformation.Stack) reso func testAccAWSCloudFormationStackConfig(stackName string) string { return fmt.Sprintf(` -resource "aws_cloudformation_stack" "network" { +resource "aws_cloudformation_stack" "test" { name = "%[1]s" template_body = < Date: Wed, 23 Oct 2019 15:37:12 +0200 Subject: [PATCH 21/55] Import test refactor for athena named queries --- aws/resource_aws_athena_named_query_test.go | 53 +++++++++------------ 1 file changed, 23 insertions(+), 30 deletions(-) diff --git a/aws/resource_aws_athena_named_query_test.go b/aws/resource_aws_athena_named_query_test.go index 49708022ca1..94d0ee2fd36 100644 --- a/aws/resource_aws_athena_named_query_test.go +++ b/aws/resource_aws_athena_named_query_test.go @@ -12,6 +12,8 @@ import ( ) func TestAccAWSAthenaNamedQuery_basic(t *testing.T) { + resourceName := "aws_athena_named_query.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -20,14 +22,21 @@ func TestAccAWSAthenaNamedQuery_basic(t *testing.T) { { Config: testAccAthenaNamedQueryConfig(acctest.RandInt(), acctest.RandString(5)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAthenaNamedQueryExists("aws_athena_named_query.foo"), + testAccCheckAWSAthenaNamedQueryExists(resourceName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSAthenaNamedQuery_withWorkGroup(t *testing.T) { + resourceName := "aws_athena_named_query.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -36,25 +45,9 @@ func TestAccAWSAthenaNamedQuery_withWorkGroup(t *testing.T) { { Config: testAccAthenaNamedWorkGroupQueryConfig(acctest.RandInt(), acctest.RandString(5)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAthenaNamedQueryExists("aws_athena_named_query.bar"), + testAccCheckAWSAthenaNamedQueryExists(resourceName), ), }, - }, - }) -} - -func TestAccAWSAthenaNamedQuery_import(t *testing.T) { - resourceName := "aws_athena_named_query.foo" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAthenaNamedQueryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAthenaNamedQueryConfig(acctest.RandInt(), acctest.RandString(5)), - }, - { ResourceName: resourceName, ImportState: true, @@ -109,20 +102,20 @@ func testAccCheckAWSAthenaNamedQueryExists(name string) resource.TestCheckFunc { func testAccAthenaNamedQueryConfig(rInt int, rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "hoge" { +resource "aws_s3_bucket" "test" { bucket = "tf-athena-db-%s-%d" force_destroy = true } -resource "aws_athena_database" "hoge" { +resource "aws_athena_database" "test" { name = "%s" - bucket = "${aws_s3_bucket.hoge.bucket}" + bucket = "${aws_s3_bucket.test.bucket}" } -resource "aws_athena_named_query" "foo" { +resource "aws_athena_named_query" "test" { name = "tf-athena-named-query-%s" - database = "${aws_athena_database.hoge.name}" - query = "SELECT * FROM ${aws_athena_database.hoge.name} limit 10;" + database = "${aws_athena_database.test.name}" + query = "SELECT * FROM ${aws_athena_database.test.name} limit 10;" description = "tf test" } `, rName, rInt, rName, rName) @@ -130,7 +123,7 @@ resource "aws_athena_named_query" "foo" { func testAccAthenaNamedWorkGroupQueryConfig(rInt int, rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "hoge" { +resource "aws_s3_bucket" "test" { bucket = "tf-athena-db-%s-%d" force_destroy = true } @@ -139,16 +132,16 @@ resource "aws_athena_workgroup" "test" { name = "tf-athena-workgroup-%s-%d" } -resource "aws_athena_database" "hoge" { +resource "aws_athena_database" "test" { name = "%s" - bucket = "${aws_s3_bucket.hoge.bucket}" + bucket = "${aws_s3_bucket.test.bucket}" } -resource "aws_athena_named_query" "bar" { +resource "aws_athena_named_query" "test" { name = "tf-athena-named-query-%s" workgroup = "${aws_athena_workgroup.test.id}" - database = "${aws_athena_database.hoge.name}" - query = "SELECT * FROM ${aws_athena_database.hoge.name} limit 10;" + database = "${aws_athena_database.test.name}" + query = "SELECT * FROM ${aws_athena_database.test.name} limit 10;" description = "tf test" } `, rName, rInt, rName, rInt, rName, rName) From 18415ee30eeef4917305cbf5d4f2031b33af25be Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Wed, 23 Oct 2019 16:00:19 +0200 Subject: [PATCH 22/55] import test lintignores for ACM cert tests --- aws/resource_aws_acm_certificate_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_acm_certificate_test.go b/aws/resource_aws_acm_certificate_test.go index afbd13349bc..694d02c30ec 100644 --- a/aws/resource_aws_acm_certificate_test.go +++ b/aws/resource_aws_acm_certificate_test.go @@ -525,6 +525,7 @@ func TestAccAWSAcmCertificate_tags(t *testing.T) { }) } +//lintignore:AT002 func TestAccAWSAcmCertificate_imported_DomainName(t *testing.T) { resourceName := "aws_acm_certificate.test" @@ -558,8 +559,8 @@ func TestAccAWSAcmCertificate_imported_DomainName(t *testing.T) { }) } -// Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/7103 -func TestAccAWSAcmCertificate_imported_IpAddress(t *testing.T) { +//lintignore:AT002 +func TestAccAWSAcmCertificate_imported_IpAddress(t *testing.T) { // Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/7103 resourceName := "aws_acm_certificate.test" resource.ParallelTest(t, resource.TestCase{ From 25b3e8fa215f608a2f18996870e25228ad9af29d Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Thu, 24 Oct 2019 10:36:41 +0200 Subject: [PATCH 23/55] import test lintignores for API gateway resources --- aws/resource_aws_api_gateway_account_test.go | 39 +-- ...aws_api_gateway_client_certificate_test.go | 39 +-- ...aws_api_gateway_documentation_part_test.go | 48 ++-- ..._api_gateway_documentation_version_test.go | 60 +---- ...esource_aws_api_gateway_usage_plan_test.go | 234 ++++++++++-------- aws/resource_aws_api_gateway_vpc_link_test.go | 44 ++-- 6 files changed, 191 insertions(+), 273 deletions(-) diff --git a/aws/resource_aws_api_gateway_account_test.go b/aws/resource_aws_api_gateway_account_test.go index 8fd6d1edbae..6f10529d25b 100644 --- a/aws/resource_aws_api_gateway_account_test.go +++ b/aws/resource_aws_api_gateway_account_test.go @@ -11,34 +11,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSAPIGatewayAccount_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_account.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayAccountConfig_empty, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSAPIGatewayAccount_basic(t *testing.T) { var conf apigateway.Account rInt := acctest.RandInt() firstName := fmt.Sprintf("tf_acc_api_gateway_cloudwatch_%d", rInt) secondName := fmt.Sprintf("tf_acc_api_gateway_cloudwatch_modified_%d", rInt) - + resourceName := "aws_api_gateway_account.test" expectedRoleArn_first := regexp.MustCompile(":role/" + firstName + "$") expectedRoleArn_second := regexp.MustCompile(":role/" + secondName + "$") @@ -50,23 +29,29 @@ func TestAccAWSAPIGatewayAccount_basic(t *testing.T) { { Config: testAccAWSAPIGatewayAccountConfig_updated(firstName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_first), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_first), + resource.TestMatchResourceAttr(resourceName, "cloudwatch_role_arn", expectedRoleArn_first), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cloudwatch_role_arn"}, + }, { Config: testAccAWSAPIGatewayAccountConfig_updated2(secondName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_second), + resource.TestMatchResourceAttr(resourceName, "cloudwatch_role_arn", expectedRoleArn_second), ), }, { Config: testAccAWSAPIGatewayAccountConfig_empty, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), ), }, diff --git a/aws/resource_aws_api_gateway_client_certificate_test.go b/aws/resource_aws_api_gateway_client_certificate_test.go index 111dd4ccfaa..418ff1a2e62 100644 --- a/aws/resource_aws_api_gateway_client_certificate_test.go +++ b/aws/resource_aws_api_gateway_client_certificate_test.go @@ -13,6 +13,7 @@ import ( func TestAccAWSAPIGatewayClientCertificate_basic(t *testing.T) { var conf apigateway.ClientCertificate + resourceName := "aws_api_gateway_client_certificate.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,38 +23,22 @@ func TestAccAWSAPIGatewayClientCertificate_basic(t *testing.T) { { Config: testAccAWSAPIGatewayClientCertificateConfig_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayClientCertificateExists("aws_api_gateway_client_certificate.cow", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_client_certificate.cow", "description", "Hello from TF acceptance test"), + testAccCheckAWSAPIGatewayClientCertificateExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "Hello from TF acceptance test"), ), }, - { - Config: testAccAWSAPIGatewayClientCertificateConfig_basic_updated, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayClientCertificateExists("aws_api_gateway_client_certificate.cow", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_client_certificate.cow", "description", "Hello from TF acceptance test - updated"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayClientCertificate_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_client_certificate.cow" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayClientCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayClientCertificateConfig_basic, - }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAWSAPIGatewayClientCertificateConfig_basic_updated, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayClientCertificateExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "Hello from TF acceptance test - updated"), + ), + }, }, }) } @@ -116,13 +101,13 @@ func testAccCheckAWSAPIGatewayClientCertificateDestroy(s *terraform.State) error } const testAccAWSAPIGatewayClientCertificateConfig_basic = ` -resource "aws_api_gateway_client_certificate" "cow" { +resource "aws_api_gateway_client_certificate" "test" { description = "Hello from TF acceptance test" } ` const testAccAWSAPIGatewayClientCertificateConfig_basic_updated = ` -resource "aws_api_gateway_client_certificate" "cow" { +resource "aws_api_gateway_client_certificate" "test" { description = "Hello from TF acceptance test - updated" } ` diff --git a/aws/resource_aws_api_gateway_documentation_part_test.go b/aws/resource_aws_api_gateway_documentation_part_test.go index b0df32e5cd1..5b86b42ed86 100644 --- a/aws/resource_aws_api_gateway_documentation_part_test.go +++ b/aws/resource_aws_api_gateway_documentation_part_test.go @@ -37,6 +37,11 @@ func TestAccAWSAPIGatewayDocumentationPart_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -78,6 +83,11 @@ func TestAccAWSAPIGatewayDocumentationPart_method(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartMethodConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -123,6 +133,11 @@ func TestAccAWSAPIGatewayDocumentationPart_responseHeader(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartResponseHeaderConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -141,39 +156,6 @@ func TestAccAWSAPIGatewayDocumentationPart_responseHeader(t *testing.T) { }) } -func TestAccAWSAPIGatewayDocumentationPart_importBasic(t *testing.T) { - var conf apigateway.DocumentationPart - - rString := acctest.RandString(8) - apiName := fmt.Sprintf("tf_acc_api_doc_part_import_%s", rString) - properties := `{"description":"Terraform Acceptance Test"}` - - resourceName := "aws_api_gateway_documentation_part.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationPartDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationPartConfig(apiName, strconv.Quote(properties)), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayDocumentationPartExists(resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, "location.#", "1"), - resource.TestCheckResourceAttr(resourceName, "location.0.type", "API"), - resource.TestCheckResourceAttr(resourceName, "properties", properties), - resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckAWSAPIGatewayDocumentationPartExists(n string, res *apigateway.DocumentationPart) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_api_gateway_documentation_version_test.go b/aws/resource_aws_api_gateway_documentation_version_test.go index c5f1c44e84f..b28788107e8 100644 --- a/aws/resource_aws_api_gateway_documentation_version_test.go +++ b/aws/resource_aws_api_gateway_documentation_version_test.go @@ -33,6 +33,11 @@ func TestAccAWSAPIGatewayDocumentationVersion_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -63,6 +68,11 @@ func TestAccAWSAPIGatewayDocumentationVersion_allFields(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationVersionAllFieldsConfig(version, apiName, stageName, uDescription), Check: resource.ComposeTestCheckFunc( @@ -76,56 +86,6 @@ func TestAccAWSAPIGatewayDocumentationVersion_allFields(t *testing.T) { }) } -func TestAccAWSAPIGatewayDocumentationVersion_importBasic(t *testing.T) { - rString := acctest.RandString(8) - version := fmt.Sprintf("tf_acc_version_import_%s", rString) - apiName := fmt.Sprintf("tf_acc_api_doc_version_import_%s", rString) - - resourceName := "aws_api_gateway_documentation_version.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationVersionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationVersionBasicConfig(version, apiName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSAPIGatewayDocumentationVersion_importAllFields(t *testing.T) { - rString := acctest.RandString(8) - version := fmt.Sprintf("tf_acc_version_import_af_%s", rString) - apiName := fmt.Sprintf("tf_acc_api_doc_version_import_af_%s", rString) - stageName := fmt.Sprintf("tf_acc_stage_%s", rString) - description := fmt.Sprintf("Tf Acc Test description %s", rString) - - resourceName := "aws_api_gateway_documentation_version.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationVersionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationVersionAllFieldsConfig(version, apiName, stageName, description), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckAWSAPIGatewayDocumentationVersionExists(n string, res *apigateway.DocumentationVersion) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_api_gateway_usage_plan_test.go b/aws/resource_aws_api_gateway_usage_plan_test.go index 1183edfaf14..47036795eb7 100644 --- a/aws/resource_aws_api_gateway_usage_plan_test.go +++ b/aws/resource_aws_api_gateway_usage_plan_test.go @@ -12,32 +12,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSAPIGatewayUsagePlan_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_usage_plan.main" - rName := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) updatedName := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -47,24 +26,29 @@ func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(updatedName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", updatedName), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + resource.TestCheckResourceAttr(resourceName, "name", updatedName), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, }, @@ -74,6 +58,7 @@ func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -83,35 +68,40 @@ func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "This is a description"), ), }, { Config: testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a new description"), + resource.TestCheckResourceAttr(resourceName, "description", "This is a new description"), ), }, { Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "This is a description"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, }, @@ -121,6 +111,7 @@ func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -130,35 +121,40 @@ func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE"), ), }, { Config: testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE2"), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE2"), ), }, { Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", ""), ), }, }, @@ -168,6 +164,7 @@ func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -177,35 +174,40 @@ func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "throttle_settings"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.burst_limit", "2"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.burst_limit", "2"), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.rate_limit", "5"), ), }, { Config: testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.burst_limit", "3"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.rate_limit", "6"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.1779463053.burst_limit", "3"), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.1779463053.rate_limit", "6"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "throttle_settings"), ), }, }, @@ -216,6 +218,7 @@ func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -225,10 +228,15 @@ func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.rate_limit", "5"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -236,6 +244,7 @@ func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -245,37 +254,42 @@ func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "quota_settings"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanQuotaConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.limit", "100"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.offset", "6"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.period", "WEEK"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.limit", "100"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.offset", "6"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.period", "WEEK"), ), }, { Config: testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.limit", "200"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.offset", "20"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.period", "MONTH"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.limit", "200"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.offset", "20"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.period", "MONTH"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "quota_settings"), ), }, }, @@ -285,6 +299,7 @@ func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -296,44 +311,49 @@ func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "test"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Handle api stages removal { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "api_stages"), ), }, // Handle api stages additions { Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "test"), ), }, // Handle api stages updates { Config: testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "foo"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "foo"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "api_stages"), ), }, }, @@ -469,7 +489,7 @@ resource "aws_api_gateway_deployment" "foo" { func testAccAWSApiGatewayUsagePlanBasicConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" } `, rName) @@ -477,7 +497,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanDescriptionConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" description = "This is a description" } @@ -486,7 +506,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" description = "This is a new description" } @@ -495,7 +515,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanProductCodeConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" product_code = "MYCODE" } @@ -504,7 +524,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" product_code = "MYCODE2" } @@ -513,7 +533,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" } `, rName) @@ -521,7 +541,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanThrottlingConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" throttle_settings { @@ -534,7 +554,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" throttle_settings { @@ -547,7 +567,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanQuotaConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" quota_settings { @@ -561,7 +581,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" quota_settings { @@ -575,7 +595,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanApiStagesConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" api_stages { @@ -588,7 +608,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" api_stages { diff --git a/aws/resource_aws_api_gateway_vpc_link_test.go b/aws/resource_aws_api_gateway_vpc_link_test.go index f6c349848d4..b70efd7dc21 100644 --- a/aws/resource_aws_api_gateway_vpc_link_test.go +++ b/aws/resource_aws_api_gateway_vpc_link_test.go @@ -59,35 +59,6 @@ func testSweepAPIGatewayVpcLinks(region string) error { } func TestAccAWSAPIGatewayVpcLink_basic(t *testing.T) { - rName := acctest.RandString(5) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsAPIGatewayVpcLinkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAPIGatewayVpcLinkConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAPIGatewayVpcLinkExists("aws_api_gateway_vpc_link.test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "name", fmt.Sprintf("tf-apigateway-%s", rName)), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "description", "test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "target_arns.#", "1"), - ), - }, - { - Config: testAccAPIGatewayVpcLinkConfig_Update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAPIGatewayVpcLinkExists("aws_api_gateway_vpc_link.test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "name", fmt.Sprintf("tf-apigateway-update-%s", rName)), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "description", "test update"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "target_arns.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayVpcLink_importBasic(t *testing.T) { rName := acctest.RandString(5) resourceName := "aws_api_gateway_vpc_link.test" @@ -98,12 +69,27 @@ func TestAccAWSAPIGatewayVpcLink_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAPIGatewayVpcLinkConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAPIGatewayVpcLinkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("tf-apigateway-%s", rName)), + resource.TestCheckResourceAttr(resourceName, "description", "test"), + resource.TestCheckResourceAttr(resourceName, "target_arns.#", "1"), + ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAPIGatewayVpcLinkConfig_Update(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAPIGatewayVpcLinkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("tf-apigateway-update-%s", rName)), + resource.TestCheckResourceAttr(resourceName, "description", "test update"), + resource.TestCheckResourceAttr(resourceName, "target_arns.#", "1"), + ), + }, }, }) } From 19cac871de80a551865fe7df259f54a4d66dd4dd Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 24 Oct 2019 12:57:34 +0300 Subject: [PATCH 24/55] add tag support for storage gateway smb file share resource replace arn tests with testAccMatchResourceAttrRegionalARN function --- ...ource_aws_storagegateway_smb_file_share.go | 22 ++++- ..._aws_storagegateway_smb_file_share_test.go | 81 ++++++++++++++++++- 2 files changed, 100 insertions(+), 3 deletions(-) diff --git a/aws/resource_aws_storagegateway_smb_file_share.go b/aws/resource_aws_storagegateway_smb_file_share.go index 16ce66a6dcd..e9b004f1c85 100644 --- a/aws/resource_aws_storagegateway_smb_file_share.go +++ b/aws/resource_aws_storagegateway_smb_file_share.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/helper/validation" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { @@ -47,6 +48,7 @@ func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { Optional: true, Default: "S3_STANDARD", ValidateFunc: validation.StringInSlice([]string{ + "S3_ONEZONE_IA", "S3_STANDARD_IA", "S3_STANDARD", @@ -123,6 +125,7 @@ func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "tags": tagsSchema(), }, } } @@ -144,6 +147,7 @@ func resourceAwsStorageGatewaySmbFileShareCreate(d *schema.ResourceData, meta in RequesterPays: aws.Bool(d.Get("requester_pays").(bool)), Role: aws.String(d.Get("role_arn").(string)), ValidUserList: expandStringSet(d.Get("valid_user_list").(*schema.Set)), + Tags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().StoragegatewayTags(), } if v, ok := d.GetOk("kms_key_arn"); ok && v.(string) != "" { @@ -200,7 +204,8 @@ func resourceAwsStorageGatewaySmbFileShareRead(d *schema.ResourceData, meta inte fileshare := output.SMBFileShareInfoList[0] - d.Set("arn", fileshare.FileShareARN) + arn := fileshare.FileShareARN + d.Set("arn", arn) d.Set("authentication", fileshare.Authentication) d.Set("default_storage_class", fileshare.DefaultStorageClass) d.Set("fileshare_id", fileshare.FileShareId) @@ -224,12 +229,27 @@ func resourceAwsStorageGatewaySmbFileShareRead(d *schema.ResourceData, meta inte return fmt.Errorf("error setting valid_user_list: %s", err) } + tags, err := keyvaluetags.StoragegatewayListTags(conn, *arn) + if err != nil { + return fmt.Errorf("error listing tags for resource (%s): %s", *arn, err) + } + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) + } + return nil } func resourceAwsStorageGatewaySmbFileShareUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).storagegatewayconn + if d.HasChange("tags") { + o, n := d.GetChange("tags") + if err := keyvaluetags.StoragegatewayUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags: %s", err) + } + } + input := &storagegateway.UpdateSMBFileShareInput{ DefaultStorageClass: aws.String(d.Get("default_storage_class").(string)), FileShareARN: aws.String(d.Id()), diff --git a/aws/resource_aws_storagegateway_smb_file_share_test.go b/aws/resource_aws_storagegateway_smb_file_share_test.go index 0fcad891c55..fcde2602673 100644 --- a/aws/resource_aws_storagegateway_smb_file_share_test.go +++ b/aws/resource_aws_storagegateway_smb_file_share_test.go @@ -26,7 +26,7 @@ func TestAccAWSStorageGatewaySmbFileShare_Authentication_ActiveDirectory(t *test Config: testAccAWSStorageGatewaySmbFileShareConfig_Authentication_ActiveDirectory(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSStorageGatewaySmbFileShareExists(resourceName, &smbFileShare), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(`^arn:[^:]+:storagegateway:[^:]+:[^:]+:share/share-.+$`)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "storagegateway", regexp.MustCompile(`share/share-.+`)), resource.TestCheckResourceAttr(resourceName, "authentication", "ActiveDirectory"), resource.TestCheckResourceAttr(resourceName, "default_storage_class", "S3_STANDARD"), resource.TestMatchResourceAttr(resourceName, "fileshare_id", regexp.MustCompile(`^share-`)), @@ -66,7 +66,7 @@ func TestAccAWSStorageGatewaySmbFileShare_Authentication_GuestAccess(t *testing. Config: testAccAWSStorageGatewaySmbFileShareConfig_Authentication_GuestAccess(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSStorageGatewaySmbFileShareExists(resourceName, &smbFileShare), - resource.TestMatchResourceAttr(resourceName, "arn", regexp.MustCompile(`^arn:[^:]+:storagegateway:[^:]+:[^:]+:share/share-.+$`)), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "storagegateway", regexp.MustCompile(`share/share-.+`)), resource.TestCheckResourceAttr(resourceName, "authentication", "GuestAccess"), resource.TestCheckResourceAttr(resourceName, "default_storage_class", "S3_STANDARD"), resource.TestMatchResourceAttr(resourceName, "fileshare_id", regexp.MustCompile(`^share-`)), @@ -125,6 +125,50 @@ func TestAccAWSStorageGatewaySmbFileShare_DefaultStorageClass(t *testing.T) { }) } +func TestAccAWSStorageGatewaySmbFileShare_Tags(t *testing.T) { + var smbFileShare storagegateway.SMBFileShareInfo + rName := acctest.RandomWithPrefix("tf-acc-test") + resourceName := "aws_storagegateway_smb_file_share.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSStorageGatewaySmbFileShareDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAWSStorageGatewaySmbFileShareConfigTags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSStorageGatewaySmbFileShareExists(resourceName, &smbFileShare), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAWSStorageGatewaySmbFileShareConfigTags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSStorageGatewaySmbFileShareExists(resourceName, &smbFileShare), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccAWSStorageGatewaySmbFileShareConfigTags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSStorageGatewaySmbFileShareExists(resourceName, &smbFileShare), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccAWSStorageGatewaySmbFileShare_GuessMIMETypeEnabled(t *testing.T) { var smbFileShare storagegateway.SMBFileShareInfo rName := acctest.RandomWithPrefix("tf-acc-test") @@ -758,3 +802,36 @@ resource "aws_storagegateway_smb_file_share" "test" { } `, validUser1, validUser2) } + +func testAccAWSStorageGatewaySmbFileShareConfigTags1(rName, tagKey1, tagValue1 string) string { + return testAccAWSStorageGateway_SmbFileShare_GuestAccessBase(rName) + fmt.Sprintf(` +resource "aws_storagegateway_smb_file_share" "test" { + # Use GuestAccess to simplify testing + authentication = "GuestAccess" + gateway_arn = "${aws_storagegateway_gateway.test.arn}" + location_arn = "${aws_s3_bucket.test.arn}" + role_arn = "${aws_iam_role.test.arn}" + + tags = { + %q = %q + } +} +`, tagKey1, tagValue1) +} + +func testAccAWSStorageGatewaySmbFileShareConfigTags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return testAccAWSStorageGateway_SmbFileShare_GuestAccessBase(rName) + fmt.Sprintf(` +resource "aws_storagegateway_smb_file_share" "test" { + # Use GuestAccess to simplify testing + authentication = "GuestAccess" + gateway_arn = "${aws_storagegateway_gateway.test.arn}" + location_arn = "${aws_s3_bucket.test.arn}" + role_arn = "${aws_iam_role.test.arn}" + + tags = { + %q = %q + %q = %q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2) +} From f566b7b3e38f2181f187c132a232b5176baf1dea Mon Sep 17 00:00:00 2001 From: DrFaust92 Date: Thu, 24 Oct 2019 17:04:36 +0300 Subject: [PATCH 25/55] add docs --- website/docs/r/storagegateway_smb_file_share.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/storagegateway_smb_file_share.html.markdown b/website/docs/r/storagegateway_smb_file_share.html.markdown index f7beaf10d2a..f0d32c750cd 100644 --- a/website/docs/r/storagegateway_smb_file_share.html.markdown +++ b/website/docs/r/storagegateway_smb_file_share.html.markdown @@ -55,6 +55,7 @@ The following arguments are supported: * `read_only` - (Optional) Boolean to indicate write status of file share. File share does not accept writes if `true`. Defaults to `false`. * `requester_pays` - (Optional) Boolean who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to `true` if you want the requester to pay instead of the bucket owner. Defaults to `false`. * `valid_user_list` - (Optional) A list of users in the Active Directory that are allowed to access the file share. Only valid if `authentication` is set to `ActiveDirectory`. +* `tags` - (Optional) Key-value mapping of resource tags ### smb_file_share_defaults From 1585c6781c37eb53b1bda4e6e54f47e785e95b83 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 24 Oct 2019 17:08:43 +0300 Subject: [PATCH 26/55] Update resource_aws_storagegateway_smb_file_share.go --- aws/resource_aws_storagegateway_smb_file_share.go | 1 - 1 file changed, 1 deletion(-) diff --git a/aws/resource_aws_storagegateway_smb_file_share.go b/aws/resource_aws_storagegateway_smb_file_share.go index e9b004f1c85..d4512b156fc 100644 --- a/aws/resource_aws_storagegateway_smb_file_share.go +++ b/aws/resource_aws_storagegateway_smb_file_share.go @@ -48,7 +48,6 @@ func resourceAwsStorageGatewaySmbFileShare() *schema.Resource { Optional: true, Default: "S3_STANDARD", ValidateFunc: validation.StringInSlice([]string{ - "S3_ONEZONE_IA", "S3_STANDARD_IA", "S3_STANDARD", From 8023ea7bedcc33e4b9cd90c1596942b2e0948f1c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 25 Oct 2019 16:04:41 -0400 Subject: [PATCH 27/55] r/aws_backup_plan: Intermediate commit after acceptance test tidy and before testing 'recovery_point_tags' attribute. --- aws/resource_aws_backup_plan.go | 170 ++++++------ aws/resource_aws_backup_plan_test.go | 393 +++++++++++++++------------ 2 files changed, 301 insertions(+), 262 deletions(-) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index bf6d542457e..51e6a83d3ed 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -68,14 +68,10 @@ func resourceAwsBackupPlan() *schema.Resource { }, }, }, - "recovery_point_tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "recovery_point_tags": tagsSchema(), }, }, - Set: resourceAwsPlanRuleHash, + Set: backupBackuPlanHash, }, "arn": { Type: schema.TypeString, @@ -93,28 +89,23 @@ func resourceAwsBackupPlan() *schema.Resource { func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - input := &backup.CreateBackupPlanInput{ - BackupPlan: plan, + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, } - if v, ok := d.GetOk("tags"); ok { input.BackupPlanTags = tagsFromMapGeneric(v.(map[string]interface{})) } + log.Printf("[DEBUG] Creating Backup Plan: %#v", input) resp, err := conn.CreateBackupPlan(input) if err != nil { return fmt.Errorf("error creating Backup Plan: %s", err) } - d.SetId(*resp.BackupPlanId) + d.SetId(aws.StringValue(resp.BackupPlanId)) return resourceAwsBackupPlanRead(d, meta) } @@ -122,43 +113,20 @@ func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.GetBackupPlanInput{ + resp, err := conn.GetBackupPlan(&backup.GetBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - resp, err := conn.GetBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { log.Printf("[WARN] Backup Plan (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if err != nil { - return fmt.Errorf("error reading Backup Plan: %s", err) + return fmt.Errorf("error reading Backup Plan (%s): %s", d.Id(), err) } - rule := &schema.Set{F: resourceAwsPlanRuleHash} - - for _, r := range resp.BackupPlan.Rules { - m := make(map[string]interface{}) - - m["completion_window"] = aws.Int64Value(r.CompletionWindowMinutes) - m["recovery_point_tags"] = aws.StringValueMap(r.RecoveryPointTags) - m["rule_name"] = aws.StringValue(r.RuleName) - m["schedule"] = aws.StringValue(r.ScheduleExpression) - m["start_window"] = aws.Int64Value(r.StartWindowMinutes) - m["target_vault_name"] = aws.StringValue(r.TargetBackupVaultName) - - if r.Lifecycle != nil { - l := make(map[string]interface{}) - l["delete_after"] = aws.Int64Value(r.Lifecycle.DeleteAfterDays) - l["cold_storage_after"] = aws.Int64Value(r.Lifecycle.MoveToColdStorageAfterDays) - m["lifecycle"] = []interface{}{l} - } - - rule.Add(m) - } - if err := d.Set("rule", rule); err != nil { + d.Set("name", resp.BackupPlan.BackupPlanName) + if err := d.Set("rule", flattenBackupPlanRules(resp.BackupPlan.Rules)); err != nil { return fmt.Errorf("error setting rule: %s", err) } @@ -182,22 +150,18 @@ func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - input := &backup.UpdateBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - BackupPlan: plan, + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, } + log.Printf("[DEBUG] Updating Backup Plan: %#v", input) _, err := conn.UpdateBackupPlan(input) if err != nil { - return fmt.Errorf("error updating Backup Plan: %s", err) + return fmt.Errorf("error updating Backup Plan (%s): %s", d.Id(), err) } if d.HasChange("tags") { @@ -248,66 +212,62 @@ func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.DeleteBackupPlanInput{ + log.Printf("[DEBUG] Deleting Backup Plan: %s", d.Id()) + _, err := conn.DeleteBackupPlan(&backup.DeleteBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - _, err := conn.DeleteBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { return nil } if err != nil { - return fmt.Errorf("error deleting Backup Plan: %s", err) + return fmt.Errorf("error deleting Backup Plan (%s): %s", d.Id(), err) } return nil } -func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { +func expandBackupPlanRules(vRules *schema.Set) []*backup.RuleInput { rules := []*backup.RuleInput{} - for _, i := range l { - item := i.(map[string]interface{}) + for _, vRule := range vRules.List() { rule := &backup.RuleInput{} - if item["rule_name"] != "" { - rule.RuleName = aws.String(item["rule_name"].(string)) + mRule := vRule.(map[string]interface{}) + + if vRuleName, ok := mRule["rule_name"].(string); ok && vRuleName != "" { + rule.RuleName = aws.String(vRuleName) } - if item["target_vault_name"] != "" { - rule.TargetBackupVaultName = aws.String(item["target_vault_name"].(string)) + if vTargetVaultName, ok := mRule["target_vault_name"].(string); ok && vTargetVaultName != "" { + rule.TargetBackupVaultName = aws.String(vTargetVaultName) } - if item["schedule"] != "" { - rule.ScheduleExpression = aws.String(item["schedule"].(string)) + if vSchedule, ok := mRule["schedule"].(string); ok && vSchedule != "" { + rule.ScheduleExpression = aws.String(vSchedule) } - if item["start_window"] != nil { - rule.StartWindowMinutes = aws.Int64(int64(item["start_window"].(int))) + if vStartWindow, ok := mRule["start_window"].(int); ok { + rule.StartWindowMinutes = aws.Int64(int64(vStartWindow)) } - if item["completion_window"] != nil { - rule.CompletionWindowMinutes = aws.Int64(int64(item["completion_window"].(int))) + if vCompletionWindow, ok := mRule["completion_window"].(int); ok { + rule.CompletionWindowMinutes = aws.Int64(int64(vCompletionWindow)) } - if item["recovery_point_tags"] != nil { - rule.RecoveryPointTags = tagsFromMapGeneric(item["recovery_point_tags"].(map[string]interface{})) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + rule.RecoveryPointTags = tagsFromMapGeneric(vRecoveryPointTags) } - var lifecycle map[string]interface{} - if i.(map[string]interface{})["lifecycle"] != nil { - lifecycleRaw := i.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lifecycleRaw) == 1 { - lifecycle = lifecycleRaw[0].(map[string]interface{}) - lcValues := &backup.Lifecycle{} - - if v, ok := lifecycle["delete_after"]; ok && v.(int) > 0 { - lcValues.DeleteAfterDays = aws.Int64(int64(v.(int))) - } - - if v, ok := lifecycle["cold_storage_after"]; ok && v.(int) > 0 { - lcValues.MoveToColdStorageAfterDays = aws.Int64(int64(v.(int))) - } - rule.Lifecycle = lcValues + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + lifecycle := &backup.Lifecycle{} + + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if vDeleteAfter, ok := mLifecycle["delete_after"].(int); ok && vDeleteAfter > 0 { + lifecycle.DeleteAfterDays = aws.Int64(int64(vDeleteAfter)) + } + if vColdStorageAfter, ok := mLifecycle["cold_storage_after"].(int); ok && vColdStorageAfter > 0 { + lifecycle.MoveToColdStorageAfterDays = aws.Int64(int64(vColdStorageAfter)) } + rule.Lifecycle = lifecycle } rules = append(rules, rule) @@ -316,7 +276,35 @@ func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { return rules } -func resourceAwsPlanRuleHash(v interface{}) int { +func flattenBackupPlanRules(rules []*backup.Rule) *schema.Set { + vRules := []interface{}{} + + for _, rule := range rules { + mRule := map[string]interface{}{ + "rule_name": aws.StringValue(rule.RuleName), + "target_vault_name": aws.StringValue(rule.TargetBackupVaultName), + "schedule": aws.StringValue(rule.ScheduleExpression), + "start_window": int(aws.Int64Value(rule.StartWindowMinutes)), + "completion_window": int(aws.Int64Value(rule.CompletionWindowMinutes)), + "recovery_point_tags": tagsToMapGeneric(rule.RecoveryPointTags), + } + + if lifecycle := rule.Lifecycle; lifecycle != nil { + mRule["lifecycle"] = []interface{}{ + map[string]interface{}{ + "delete_after": int(aws.Int64Value(lifecycle.DeleteAfterDays)), + "cold_storage_after": int(aws.Int64Value(lifecycle.MoveToColdStorageAfterDays)), + }, + } + } + + vRules = append(vRules, mRule) + } + + return schema.NewSet(backupBackuPlanHash, vRules) +} + +func backupBackuPlanHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) diff --git a/aws/resource_aws_backup_plan_test.go b/aws/resource_aws_backup_plan_test.go index 9fd797bd852..2be7279e5d3 100644 --- a/aws/resource_aws_backup_plan_test.go +++ b/aws/resource_aws_backup_plan_test.go @@ -14,7 +14,9 @@ import ( func TestAccAwsBackupPlan_basic(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -22,13 +24,18 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - testAccMatchResourceAttrRegionalARN("aws_backup_plan.test", "arn", "backup", regexp.MustCompile(`backup-plan:.+`)), - resource.TestCheckResourceAttrSet("aws_backup_plan.test", "version"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), - resource.TestCheckNoResourceAttr("aws_backup_plan.test", "rule.712706565.lifecycle.#"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "backup", regexp.MustCompile(`backup-plan:.+`)), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, }, @@ -37,7 +44,9 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { func TestAccAwsBackupPlan_withTags(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -45,28 +54,36 @@ func TestAccAwsBackupPlan_withTags(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithTag(rInt), + Config: testAccAwsBackupPlanConfig_tags(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2a"), ), }, { - Config: testAccBackupPlanWithTags(rInt), + Config: testAccAwsBackupPlanConfig_tagsUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.app", "widget"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2b"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), ), }, { - Config: testAccBackupPlanWithTag(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -75,7 +92,12 @@ func TestAccAwsBackupPlan_withTags(t *testing.T) { func TestAccAwsBackupPlan_withRules(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rule1Name := fmt.Sprintf("%s_1", rName) + rule2Name := fmt.Sprintf("%s_2", rName) + rule3Name := fmt.Sprintf("%s_3", rName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -83,46 +105,65 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithRules(rInt), + Config: testAccAwsBackupPlanConfig_twoRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "rule_name", rule1Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 6 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withRuleRemove(t *testing.T) { - var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithRules(rInt), + Config: testAccAwsBackupPlanConfig_threeRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "rule_name", rule1Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 6 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "rule_name", rule3Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "schedule", "cron(0 18 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "lifecycle.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, }) } -func TestAccAwsBackupPlan_withRuleAdd(t *testing.T) { +func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -130,81 +171,49 @@ func TestAccAwsBackupPlan_withRuleAdd(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_lifecycleColdStorageAfterOnly(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "7"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "0"), ), }, { - Config: testAccBackupPlanWithRules(rInt), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), - ), - }, - }, - }) -} - -func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { - var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBackupPlanWithLifecycle(rStr), + Config: testAccAwsBackupPlanConfig_lifecycleDeleteAfterOnly(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1028372010.lifecycle.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "120"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withLifecycleDeleteAfterOnly(t *testing.T) { - var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy_two" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithLifecycleDeleteAfterOnly(rStr), + Config: testAccAwsBackupPlanConfig_lifecycleColdStorageAfterAndDeleteAfter(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.#", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.0.delete_after", "7"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.0.cold_storage_after", "0"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "30"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "180"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withLifecycleColdStorageAfterOnly(t *testing.T) { - var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy_three" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithLifecycleColdStorageAfterOnly(rStr), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.#", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.0.delete_after", "0"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.0.cold_storage_after", "7"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), ), }, }, @@ -213,7 +222,9 @@ func TestAccAwsBackupPlan_withLifecycleColdStorageAfterOnly(t *testing.T) { func TestAccAwsBackupPlan_disappears(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -221,9 +232,9 @@ func TestAccAwsBackupPlan_disappears(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), testAccCheckAwsBackupPlanDisappears(&plan), ), ExpectNonEmptyPlan: true, @@ -269,186 +280,226 @@ func testAccCheckAwsBackupPlanDisappears(backupPlan *backup.GetBackupPlanOutput) } } -func testAccCheckAwsBackupPlanExists(name string, plan *backup.GetBackupPlanOutput) resource.TestCheckFunc { +func testAccCheckAwsBackupPlanExists(name string, plan *backup.GetBackupPlanOutput, ruleNameMap *map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + conn := testAccProvider.Meta().(*AWSClient).backupconn + rs, ok := s.RootModule().Resources[name] if !ok { return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("Resource ID is not set") + return fmt.Errorf("No ID is set") } - conn := testAccProvider.Meta().(*AWSClient).backupconn - - input := &backup.GetBackupPlanInput{ + output, err := conn.GetBackupPlan(&backup.GetBackupPlanInput{ BackupPlanId: aws.String(rs.Primary.ID), - } - - output, err := conn.GetBackupPlan(input) - + }) if err != nil { return err } *plan = *output + // Build map of rule name to hash value. + re := regexp.MustCompile(`^rule\.(\d+)\.rule_name$`) + for k, v := range rs.Primary.Attributes { + matches := re.FindStringSubmatch(k) + if matches != nil { + (*ruleNameMap)[v] = matches[1] + } + } + return nil } } -func testAccBackupPlanConfig(randInt int) string { +func testAccCheckAwsBackupPlanRuleAttr(name string, ruleNameMap *map[string]string, ruleName, key, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + return resource.TestCheckResourceAttr(name, fmt.Sprintf("rule.%s.%s", (*ruleNameMap)[ruleName], key), value)(s) + } +} + +func testAccAwsBackupPlanConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithTag(randInt int) string { +func testAccAwsBackupPlanConfig_tags(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } tags = { - env = "test" + Name = %[1]q + Key1 = "Value1" + Key2 = "Value2a" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithTags(randInt int) string { +func testAccAwsBackupPlanConfig_tagsUpdated(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } tags = { - env = "test" - app = "widget" + Name = %[1]q + Key2 = "Value2b" + Key3 = "Value3" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithLifecycle(stringID string) string { +func testAccAwsBackupPlanConfig_twoRules(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = "%[1]s_1" target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" + } + rule { + rule_name = "%[1]s_2" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 6 * * ? *)" + } +} +`, rName) +} - lifecycle { - cold_storage_after = 30 - delete_after = 160 - } +func testAccAwsBackupPlanConfig_threeRules(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q + + rule { + rule_name = "%[1]s_1" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 6 * * ? *)" + } + rule { + rule_name = "%[1]s_2" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 12 * * ? *)" + } + rule { + rule_name = "%[1]s_3" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 18 * * ? *)" } } -`, stringID) +`, rName) } -func testAccBackupPlanWithLifecycleDeleteAfterOnly(stringID string) string { +func testAccAwsBackupPlanConfig_lifecycleColdStorageAfterOnly(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" lifecycle { - delete_after = "7" + cold_storage_after = 7 } } } -`, stringID) +`, rName) } -func testAccBackupPlanWithLifecycleColdStorageAfterOnly(stringID string) string { +func testAccAwsBackupPlanConfig_lifecycleDeleteAfterOnly(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" lifecycle { - cold_storage_after = "7" + delete_after = 120 } } } -`, stringID) +`, rName) } -func testAccBackupPlanWithRules(randInt int) string { +func testAccAwsBackupPlanConfig_lifecycleColdStorageAfterAndDeleteAfter(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" - } - rule { - rule_name = "tf_acc_test_backup_rule_%[1]d_2" - target_vault_name = "${aws_backup_vault.test.name}" - schedule = "cron(0 6 * * ? *)" + lifecycle { + cold_storage_after = 30 + delete_after = 180 + } } } -`, randInt) +`, rName) } From 3f4948228bc3aef1e5923e0ddfdd59d6f6549ffc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 25 Oct 2019 16:39:54 -0400 Subject: [PATCH 28/55] r/aws_backup_plan: Add 'recovery_point_tags' acceptance test. $ make testacc TEST=./aws TESTARGS='-run=TestAccAwsBackupPlan_' ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./aws -v -count 1 -parallel 20 -run=TestAccAwsBackupPlan_ -timeout 120m === RUN TestAccAwsBackupPlan_basic === PAUSE TestAccAwsBackupPlan_basic === RUN TestAccAwsBackupPlan_withTags === PAUSE TestAccAwsBackupPlan_withTags === RUN TestAccAwsBackupPlan_withRules === PAUSE TestAccAwsBackupPlan_withRules === RUN TestAccAwsBackupPlan_withLifecycle === PAUSE TestAccAwsBackupPlan_withLifecycle === RUN TestAccAwsBackupPlan_withRecoveryPointTags === PAUSE TestAccAwsBackupPlan_withRecoveryPointTags === RUN TestAccAwsBackupPlan_disappears === PAUSE TestAccAwsBackupPlan_disappears === CONT TestAccAwsBackupPlan_basic === CONT TestAccAwsBackupPlan_withRecoveryPointTags === CONT TestAccAwsBackupPlan_disappears === CONT TestAccAwsBackupPlan_withRules === CONT TestAccAwsBackupPlan_withLifecycle === CONT TestAccAwsBackupPlan_withTags --- PASS: TestAccAwsBackupPlan_disappears (19.33s) --- PASS: TestAccAwsBackupPlan_basic (20.82s) --- FAIL: TestAccAwsBackupPlan_withRecoveryPointTags (26.61s) testing.go:569: Step 1 error: errors during apply: Error: error updating Backup Plan (1104fa3b-5714-4c2e-9776-78660300ee6b): InvalidParameter: 2 validation error(s) found. - missing required field, UpdateBackupPlanInput.BackupPlan.Rules[0].RuleName. - missing required field, UpdateBackupPlanInput.BackupPlan.Rules[0].TargetBackupVaultName. on /tmp/tf-test417192270/main.tf line 6: (source code not available) --- PASS: TestAccAwsBackupPlan_withRules (47.10s) --- PASS: TestAccAwsBackupPlan_withTags (47.82s) --- PASS: TestAccAwsBackupPlan_withLifecycle (60.07s) FAIL FAIL github.com/terraform-providers/terraform-provider-aws/aws 60.128s FAIL GNUmakefile:24: recipe for target 'testacc' failed make: *** [testacc] Error 1 --- aws/resource_aws_backup_plan_test.go | 122 +++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) diff --git a/aws/resource_aws_backup_plan_test.go b/aws/resource_aws_backup_plan_test.go index 2be7279e5d3..7ce2dab20c5 100644 --- a/aws/resource_aws_backup_plan_test.go +++ b/aws/resource_aws_backup_plan_test.go @@ -34,6 +34,7 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttrSet(resourceName, "version"), ), @@ -114,10 +115,12 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 12 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "recovery_point_tags.%", "0"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 6 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "recovery_point_tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -131,14 +134,17 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 6 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "recovery_point_tags.%", "0"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 12 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "recovery_point_tags.%", "0"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "rule_name", rule3Name), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "schedule", "cron(0 18 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "recovery_point_tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -152,6 +158,7 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -180,6 +187,7 @@ func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "7"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, { @@ -192,6 +200,7 @@ func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "0"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "120"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, { @@ -204,6 +213,7 @@ func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "30"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "180"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, { @@ -214,6 +224,70 @@ func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), + ), + }, + }, + }) +} + +func TestAccAwsBackupPlan_withRecoveryPointTags(t *testing.T) { + var plan backup.GetBackupPlanOutput + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAwsBackupPlanDestroy, + Steps: []resource.TestStep{ + { + Config: testAccAwsBackupPlanConfig_recoveryPointTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key1", "Value1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key2", "Value2a"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: testAccAwsBackupPlanConfig_recoveryPointTagsUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key2", "Value2b"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key3", "Value3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: testAccAwsBackupPlanConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -503,3 +577,51 @@ resource "aws_backup_plan" "test" { } `, rName) } + +func testAccAwsBackupPlanConfig_recoveryPointTags(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q + + rule { + rule_name = %[1]q + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 12 * * ? *)" + + recovery_point_tags = { + Name = %[1]q + Key1 = "Value1" + Key2 = "Value2a" + } + } +} +`, rName) +} + +func testAccAwsBackupPlanConfig_recoveryPointTagsUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q + + rule { + rule_name = %[1]q + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 12 * * ? *)" + + recovery_point_tags = { + Name = %[1]q + Key2 = "Value2b" + Key3 = "Value3" + } + } +} +`, rName) +} From 1e9ca65facd3e2d460e5cff6fa38528dad2671d4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 25 Oct 2019 17:23:41 -0400 Subject: [PATCH 29/55] r/aws_backup_plan: Don't update rules that don't have a name. --- aws/resource_aws_backup_plan.go | 84 ++++++++++++++++----------------- 1 file changed, 41 insertions(+), 43 deletions(-) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index 51e6a83d3ed..58b3e5a8a58 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -71,7 +71,7 @@ func resourceAwsBackupPlan() *schema.Resource { "recovery_point_tags": tagsSchema(), }, }, - Set: backupBackuPlanHash, + Set: backupBackupPlanHash, }, "arn": { Type: schema.TypeString, @@ -150,18 +150,20 @@ func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.UpdateBackupPlanInput{ - BackupPlanId: aws.String(d.Id()), - BackupPlan: &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), - }, - } + if d.HasChange("rule") { + input := &backup.UpdateBackupPlanInput{ + BackupPlanId: aws.String(d.Id()), + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, + } - log.Printf("[DEBUG] Updating Backup Plan: %#v", input) - _, err := conn.UpdateBackupPlan(input) - if err != nil { - return fmt.Errorf("error updating Backup Plan (%s): %s", d.Id(), err) + log.Printf("[DEBUG] Updating Backup Plan: %#v", input) + _, err := conn.UpdateBackupPlan(input) + if err != nil { + return fmt.Errorf("error updating Backup Plan (%s): %s", d.Id(), err) + } } if d.HasChange("tags") { @@ -237,6 +239,8 @@ func expandBackupPlanRules(vRules *schema.Set) []*backup.RuleInput { if vRuleName, ok := mRule["rule_name"].(string); ok && vRuleName != "" { rule.RuleName = aws.String(vRuleName) + } else { + continue } if vTargetVaultName, ok := mRule["target_vault_name"].(string); ok && vTargetVaultName != "" { rule.TargetBackupVaultName = aws.String(vTargetVaultName) @@ -301,49 +305,43 @@ func flattenBackupPlanRules(rules []*backup.Rule) *schema.Set { vRules = append(vRules, mRule) } - return schema.NewSet(backupBackuPlanHash, vRules) + return schema.NewSet(backupBackupPlanHash, vRules) } -func backupBackuPlanHash(v interface{}) int { +func backupBackupPlanHash(vRule interface{}) int { var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v.(map[string]interface{})["lifecycle"] != nil { - lcRaw := v.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lcRaw) == 1 { - l := lcRaw[0].(map[string]interface{}) - if w, ok := l["delete_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) - } - if w, ok := l["cold_storage_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) - } - } - } + mRule := vRule.(map[string]interface{}) - if v, ok := m["completion_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + if v, ok := mRule["rule_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) } - - if v, ok := m["recovery_point_tags"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v)) + if v, ok := mRule["target_vault_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) } - - if v, ok := m["rule_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + if v, ok := mRule["schedule"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) } - - if v, ok := m["schedule"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + if v, ok := mRule["start_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mRule["completion_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) } - if v, ok := m["start_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + buf.WriteString(fmt.Sprintf("%d-", tagsMapToHash(vRecoveryPointTags))) } - if v, ok := m["target_vault_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if v, ok := mLifecycle["delete_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mLifecycle["cold_storage_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } } return hashcode.String(buf.String()) From ac55cc1b8a3b1c9a532214acc171d27b7a141f51 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 25 Oct 2019 18:17:01 -0400 Subject: [PATCH 30/55] r/aws_backup_plan: Use 'keyvaluetags'. --- aws/resource_aws_backup_plan.go | 63 +++++++-------------------------- 1 file changed, 12 insertions(+), 51 deletions(-) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index 58b3e5a8a58..1a895826e15 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/backup" "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsBackupPlan() *schema.Resource { @@ -94,9 +95,7 @@ func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error BackupPlanName: aws.String(d.Get("name").(string)), Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), }, - } - if v, ok := d.GetOk("tags"); ok { - input.BackupPlanTags = tagsFromMapGeneric(v.(map[string]interface{})) + BackupPlanTags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().BackupTags(), } log.Printf("[DEBUG] Creating Backup Plan: %#v", input) @@ -125,25 +124,22 @@ func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading Backup Plan (%s): %s", d.Id(), err) } + d.Set("arn", resp.BackupPlanArn) d.Set("name", resp.BackupPlan.BackupPlanName) + d.Set("version", resp.VersionId) + if err := d.Set("rule", flattenBackupPlanRules(resp.BackupPlan.Rules)); err != nil { return fmt.Errorf("error setting rule: %s", err) } - tagsOutput, err := conn.ListTags(&backup.ListTagsInput{ - ResourceArn: resp.BackupPlanArn, - }) + tags, err := keyvaluetags.BackupListTags(conn, d.Get("arn").(string)) if err != nil { - return fmt.Errorf("error listing tags AWS Backup plan %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Backup Plan (%s): %s", d.Id(), err) } - - if err := d.Set("tags", tagsToMapGeneric(tagsOutput.Tags)); err != nil { - return fmt.Errorf("error setting tags on AWS Backup plan %s: %s", d.Id(), err) + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - d.Set("arn", resp.BackupPlanArn) - d.Set("version", resp.VersionId) - return nil } @@ -167,44 +163,9 @@ func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error } if d.HasChange("tags") { - resourceArn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{})) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&backup.UntagResourceInput{ - ResourceArn: aws.String(resourceArn), - TagKeyList: keys, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error removing tags for (%s): %s", d.Id(), err) - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&backup.TagResourceInput{ - ResourceArn: aws.String(resourceArn), - Tags: create, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error setting tags for (%s): %s", d.Id(), err) - } + o, n := d.GetChange("tags") + if err := keyvaluetags.BackupUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags for Backup Plan (%s): %s", d.Id(), err) } } From e74a8fdc93b3022a87225fa0e174e34f09de2c30 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 28 Oct 2019 11:17:23 +0100 Subject: [PATCH 31/55] Import test refactor for launch resources --- aws/resource_aws_launch_configuration_test.go | 267 ++++++++------ aws/resource_aws_launch_template_test.go | 336 ++++++++++-------- 2 files changed, 344 insertions(+), 259 deletions(-) diff --git a/aws/resource_aws_launch_configuration_test.go b/aws/resource_aws_launch_configuration_test.go index 7ce44b637df..a47dffd475e 100644 --- a/aws/resource_aws_launch_configuration_test.go +++ b/aws/resource_aws_launch_configuration_test.go @@ -63,8 +63,9 @@ func testSweepLaunchConfigurations(region string) error { return nil } -func TestAccAWSLaunchConfiguration_importBasic(t *testing.T) { - resourceName := "aws_launch_configuration.bar" +func TestAccAWSLaunchConfiguration_basic(t *testing.T) { + var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -73,38 +74,22 @@ func TestAccAWSLaunchConfiguration_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSLaunchConfigurationNoNameConfig(), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + testAccCheckAWSLaunchConfigurationGeneratedNamePrefix(resourceName, "terraform-"), + ), }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, }, - }, - }) -} - -func TestAccAWSLaunchConfiguration_basic(t *testing.T) { - var conf autoscaling.LaunchConfiguration - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchConfigurationNoNameConfig(), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - testAccCheckAWSLaunchConfigurationGeneratedNamePrefix("aws_launch_configuration.bar", "terraform-"), - ), - }, { Config: testAccAWSLaunchConfigurationPrefixNameConfig(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - testAccCheckAWSLaunchConfigurationGeneratedNamePrefix("aws_launch_configuration.baz", "tf-acc-test-"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + testAccCheckAWSLaunchConfigurationGeneratedNamePrefix(resourceName, "tf-acc-test-"), ), }, }, @@ -113,6 +98,7 @@ func TestAccAWSLaunchConfiguration_basic(t *testing.T) { func TestAccAWSLaunchConfiguration_withBlockDevices(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -122,20 +108,27 @@ func TestAccAWSLaunchConfiguration_withBlockDevices(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfig(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), testAccCheckAWSLaunchConfigurationAttributes(&conf), - resource.TestMatchResourceAttr("aws_launch_configuration.bar", "image_id", regexp.MustCompile("^ami-[0-9a-z]+")), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "instance_type", "m1.small"), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "associate_public_ip_address", "true"), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "spot_price", ""), + resource.TestMatchResourceAttr(resourceName, "image_id", regexp.MustCompile("^ami-[0-9a-z]+")), + resource.TestCheckResourceAttr(resourceName, "instance_type", "m1.small"), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "true"), + resource.TestCheckResourceAttr(resourceName, "spot_price", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, }, }) } func TestAccAWSLaunchConfiguration_updateRootBlockDevice(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -146,15 +139,21 @@ func TestAccAWSLaunchConfiguration_updateRootBlockDevice(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfigWithRootBlockDevice(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "root_block_device.0.volume_size", "11"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", "11"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address", "name_prefix"}, + }, { Config: testAccAWSLaunchConfigurationConfigWithRootBlockDeviceUpdated(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "root_block_device.0.volume_size", "20"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.volume_size", "20"), ), }, }, @@ -164,6 +163,7 @@ func TestAccAWSLaunchConfiguration_updateRootBlockDevice(t *testing.T) { func TestAccAWSLaunchConfiguration_encryptedRootBlockDevice(t *testing.T) { var conf autoscaling.LaunchConfiguration rInt := acctest.RandInt() + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -173,16 +173,23 @@ func TestAccAWSLaunchConfiguration_encryptedRootBlockDevice(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfigWithEncryptedRootBlockDevice(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "root_block_device.0.encrypted", "true"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "root_block_device.0.encrypted", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address", "name_prefix"}, + }, }, }) } func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -192,10 +199,16 @@ func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) { { Config: testAccAWSLaunchConfigurationWithSpotPriceConfig(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "spot_price", "0.01"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "spot_price", "0.01"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, }, }) } @@ -205,6 +218,7 @@ func TestAccAWSLaunchConfiguration_withVpcClassicLink(t *testing.T) { var group ec2.SecurityGroup var conf autoscaling.LaunchConfiguration rInt := acctest.RandInt() + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -214,13 +228,13 @@ func TestAccAWSLaunchConfiguration_withVpcClassicLink(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfig_withVpcClassicLink(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.foo", &conf), - testAccCheckVpcExists("aws_vpc.foo", &vpc), - testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + testAccCheckVpcExists("aws_vpc.test", &vpc), + testAccCheckAWSSecurityGroupExists("aws_security_group.test", &group), ), }, { - ResourceName: "aws_launch_configuration.foo", + ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, @@ -231,6 +245,7 @@ func TestAccAWSLaunchConfiguration_withVpcClassicLink(t *testing.T) { func TestAccAWSLaunchConfiguration_withIAMProfile(t *testing.T) { var conf autoscaling.LaunchConfiguration rInt := acctest.RandInt() + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -240,41 +255,22 @@ func TestAccAWSLaunchConfiguration_withIAMProfile(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfig_withIAMProfile(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, }, }) } -func testAccCheckAWSLaunchConfigurationWithEncryption(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc { - return func(s *terraform.State) error { - // Map out the block devices by name, which should be unique. - blockDevices := make(map[string]*autoscaling.BlockDeviceMapping) - for _, blockDevice := range conf.BlockDeviceMappings { - blockDevices[*blockDevice.DeviceName] = blockDevice - } - - // Check if the root block device exists. - if _, ok := blockDevices["/dev/sda1"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sda1") - } else if blockDevices["/dev/sda1"].Ebs.Encrypted != nil { - return fmt.Errorf("root device should not include value for Encrypted") - } - - // Check if the secondary block device exists. - if _, ok := blockDevices["/dev/sdb"]; !ok { - return fmt.Errorf("block device doesn't exist: /dev/sdb") - } else if !*blockDevices["/dev/sdb"].Ebs.Encrypted { - return fmt.Errorf("block device isn't encrypted as expected: /dev/sdb") - } - - return nil - } -} - func TestAccAWSLaunchConfiguration_withEncryption(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -284,16 +280,23 @@ func TestAccAWSLaunchConfiguration_withEncryption(t *testing.T) { { Config: testAccAWSLaunchConfigurationWithEncryption(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), + testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.test", &conf), testAccCheckAWSLaunchConfigurationWithEncryption(&conf), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, }, }) } func TestAccAWSLaunchConfiguration_updateEbsBlockDevices(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -303,15 +306,21 @@ func TestAccAWSLaunchConfiguration_updateEbsBlockDevices(t *testing.T) { { Config: testAccAWSLaunchConfigurationWithEncryption(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.baz", "ebs_block_device.1393547169.volume_size", "9"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.1393547169.volume_size", "9"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, { Config: testAccAWSLaunchConfigurationWithEncryptionUpdated(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.baz", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.baz", "ebs_block_device.4131155854.volume_size", "10"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.4131155854.volume_size", "10"), ), }, }, @@ -321,6 +330,7 @@ func TestAccAWSLaunchConfiguration_updateEbsBlockDevices(t *testing.T) { func TestAccAWSLaunchConfiguration_ebs_noDevice(t *testing.T) { var conf autoscaling.LaunchConfiguration rInt := acctest.RandInt() + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -330,17 +340,24 @@ func TestAccAWSLaunchConfiguration_ebs_noDevice(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfigEbsNoDevice(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "ebs_block_device.#", "1"), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "ebs_block_device.3099842682.device_name", "/dev/sda2"), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "ebs_block_device.3099842682.no_device", "true"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.3099842682.device_name", "/dev/sda2"), + resource.TestCheckResourceAttr(resourceName, "ebs_block_device.3099842682.no_device", "true"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address", "name_prefix"}, + }, }, }) } func TestAccAWSLaunchConfiguration_userData(t *testing.T) { var conf autoscaling.LaunchConfiguration + resourceName := "aws_launch_configuration.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -350,21 +367,53 @@ func TestAccAWSLaunchConfiguration_userData(t *testing.T) { { Config: testAccAWSLaunchConfigurationConfig_userData(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address"}, + }, { Config: testAccAWSLaunchConfigurationConfig_userDataBase64(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.bar", &conf), - resource.TestCheckResourceAttr("aws_launch_configuration.bar", "user_data_base64", "aGVsbG8gd29ybGQ="), + testAccCheckAWSLaunchConfigurationExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "user_data_base64", "aGVsbG8gd29ybGQ="), ), }, }, }) } +func testAccCheckAWSLaunchConfigurationWithEncryption(conf *autoscaling.LaunchConfiguration) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Map out the block devices by name, which should be unique. + blockDevices := make(map[string]*autoscaling.BlockDeviceMapping) + for _, blockDevice := range conf.BlockDeviceMappings { + blockDevices[*blockDevice.DeviceName] = blockDevice + } + + // Check if the root block device exists. + if _, ok := blockDevices["/dev/sda1"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/sda1") + } else if blockDevices["/dev/sda1"].Ebs.Encrypted != nil { + return fmt.Errorf("root device should not include value for Encrypted") + } + + // Check if the secondary block device exists. + if _, ok := blockDevices["/dev/sdb"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/sdb") + } else if !*blockDevices["/dev/sdb"].Ebs.Encrypted { + return fmt.Errorf("block device isn't encrypted as expected: /dev/sdb") + } + + return nil + } +} + func testAccCheckAWSLaunchConfigurationGeneratedNamePrefix( resource, prefix string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -501,11 +550,11 @@ data "aws_ami" "ubuntu" { func testAccAWSLaunchConfigurationConfigWithRootBlockDevice(rInt int) string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name_prefix = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "m1.small" - user_data = "foobar-user-data" + user_data = "testtest-user-data" associate_public_ip_address = true root_block_device { @@ -518,7 +567,7 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfigWithEncryptedRootBlockDevice(rInt int) string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -526,9 +575,9 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" availability_zone = "us-west-2a" tags = { @@ -536,11 +585,11 @@ resource "aws_subnet" "foo" { } } -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name_prefix = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t3.nano" - user_data = "foobar-user-data" + user_data = "testtest-user-data" associate_public_ip_address = true root_block_device { @@ -554,11 +603,11 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfigWithRootBlockDeviceUpdated(rInt int) string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name_prefix = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "m1.small" - user_data = "foobar-user-data" + user_data = "testtest-user-data" associate_public_ip_address = true root_block_device { @@ -571,11 +620,11 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfig() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "m1.small" - user_data = "foobar-user-data" + user_data = "testtest-user-data" associate_public_ip_address = true root_block_device { @@ -602,7 +651,7 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationWithSpotPriceConfig() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" @@ -613,10 +662,10 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationNoNameConfig() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" - user_data = "foobar-user-data-change" + user_data = "testtest-user-data-change" associate_public_ip_address = false } `) @@ -624,11 +673,11 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationPrefixNameConfig() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "baz" { +resource "aws_launch_configuration" "test" { name_prefix = "tf-acc-test-" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" - user_data = "foobar-user-data-change" + user_data = "testtest-user-data-change" associate_public_ip_address = false } `) @@ -636,7 +685,7 @@ resource "aws_launch_configuration" "baz" { func testAccAWSLaunchConfigurationWithEncryption() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "baz" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" associate_public_ip_address = false @@ -656,7 +705,7 @@ resource "aws_launch_configuration" "baz" { func testAccAWSLaunchConfigurationWithEncryptionUpdated() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "baz" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" associate_public_ip_address = false @@ -676,7 +725,7 @@ resource "aws_launch_configuration" "baz" { func testAccAWSLaunchConfigurationConfig_withVpcClassicLink(rInt int) string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.0.0.0/16" enable_classiclink = true tags = { @@ -684,18 +733,18 @@ resource "aws_vpc" "foo" { } } -resource "aws_security_group" "foo" { +resource "aws_security_group" "test" { name = "tf-acc-test-%[1]d" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" } -resource "aws_launch_configuration" "foo" { +resource "aws_launch_configuration" "test" { name = "tf-acc-test-%[1]d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" - vpc_classic_link_id = "${aws_vpc.foo.id}" - vpc_classic_link_security_groups = ["${aws_security_group.foo.id}"] + vpc_classic_link_id = "${aws_vpc.test.id}" + vpc_classic_link_security_groups = ["${aws_security_group.test.id}"] } `, rInt) } @@ -726,7 +775,7 @@ resource "aws_iam_instance_profile" "profile" { roles = ["${aws_iam_role.role.name}"] } -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.nano" iam_instance_profile = "${aws_iam_instance_profile.profile.name}" @@ -736,7 +785,7 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfigEbsNoDevice(rInt int) string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { name_prefix = "tf-acc-test-%d" image_id = "${data.aws_ami.ubuntu.id}" instance_type = "m1.small" @@ -750,7 +799,7 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfig_userData() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" user_data = "foo:-with-character's" @@ -761,7 +810,7 @@ resource "aws_launch_configuration" "bar" { func testAccAWSLaunchConfigurationConfig_userDataBase64() string { return testAccAWSLaunchConfigurationConfig_ami() + fmt.Sprintf(` -resource "aws_launch_configuration" "bar" { +resource "aws_launch_configuration" "test" { image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" user_data_base64 = "${base64encode("hello world")}" diff --git a/aws/resource_aws_launch_template_test.go b/aws/resource_aws_launch_template_test.go index 3a173959479..a73a3d9fa12 100644 --- a/aws/resource_aws_launch_template_test.go +++ b/aws/resource_aws_launch_template_test.go @@ -13,51 +13,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSLaunchTemplate_importBasic(t *testing.T) { - resName := "aws_launch_template.foo" - rInt := acctest.RandInt() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchTemplateConfig_basic(rInt), - }, - { - ResourceName: resName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSLaunchTemplate_importData(t *testing.T) { - resName := "aws_launch_template.foo" - rInt := acctest.RandInt() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSLaunchTemplateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSLaunchTemplateConfig_data(rInt), - }, - { - ResourceName: resName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSLaunchTemplate_basic(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -68,21 +26,26 @@ func TestAccAWSLaunchTemplate_basic(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_basic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "default_version", "1"), - resource.TestCheckResourceAttr(resName, "latest_version", "1"), - resource.TestCheckResourceAttrSet(resName, "arn"), - resource.TestCheckResourceAttr(resName, "ebs_optimized", ""), - resource.TestCheckResourceAttr(resName, "elastic_inference_accelerator.#", "0"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "default_version", "1"), + resource.TestCheckResourceAttr(resourceName, "latest_version", "1"), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "ebs_optimized", ""), + resource.TestCheckResourceAttr(resourceName, "elastic_inference_accelerator.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSLaunchTemplate_disappears(t *testing.T) { var launchTemplate ec2.LaunchTemplate - resourceName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -263,7 +226,7 @@ func TestAccAWSLaunchTemplate_ElasticInferenceAccelerator(t *testing.T) { func TestAccAWSLaunchTemplate_data(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -274,34 +237,39 @@ func TestAccAWSLaunchTemplate_data(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_data(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "block_device_mappings.#", "1"), - resource.TestCheckResourceAttrSet(resName, "disable_api_termination"), - resource.TestCheckResourceAttr(resName, "ebs_optimized", "false"), - resource.TestCheckResourceAttr(resName, "elastic_gpu_specifications.#", "1"), - resource.TestCheckResourceAttr(resName, "iam_instance_profile.#", "1"), - resource.TestCheckResourceAttrSet(resName, "image_id"), - resource.TestCheckResourceAttrSet(resName, "instance_initiated_shutdown_behavior"), - resource.TestCheckResourceAttr(resName, "instance_market_options.#", "1"), - resource.TestCheckResourceAttrSet(resName, "instance_type"), - resource.TestCheckResourceAttrSet(resName, "kernel_id"), - resource.TestCheckResourceAttrSet(resName, "key_name"), - resource.TestCheckResourceAttr(resName, "monitoring.#", "1"), - resource.TestCheckResourceAttr(resName, "network_interfaces.#", "1"), - resource.TestCheckResourceAttr(resName, "network_interfaces.0.security_groups.#", "1"), - resource.TestCheckResourceAttr(resName, "placement.#", "1"), - resource.TestCheckResourceAttrSet(resName, "ram_disk_id"), - resource.TestCheckResourceAttr(resName, "vpc_security_group_ids.#", "1"), - resource.TestCheckResourceAttr(resName, "tag_specifications.#", "1"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "block_device_mappings.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "disable_api_termination"), + resource.TestCheckResourceAttr(resourceName, "ebs_optimized", "false"), + resource.TestCheckResourceAttr(resourceName, "elastic_gpu_specifications.#", "1"), + resource.TestCheckResourceAttr(resourceName, "iam_instance_profile.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "image_id"), + resource.TestCheckResourceAttrSet(resourceName, "instance_initiated_shutdown_behavior"), + resource.TestCheckResourceAttr(resourceName, "instance_market_options.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "instance_type"), + resource.TestCheckResourceAttrSet(resourceName, "kernel_id"), + resource.TestCheckResourceAttrSet(resourceName, "key_name"), + resource.TestCheckResourceAttr(resourceName, "monitoring.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.security_groups.#", "1"), + resource.TestCheckResourceAttr(resourceName, "placement.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "ram_disk_id"), + resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tag_specifications.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSLaunchTemplate_description(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rName := acctest.RandomWithPrefix("tf-acc-test") resource.ParallelTest(t, resource.TestCase{ @@ -312,15 +280,20 @@ func TestAccAWSLaunchTemplate_description(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_description(rName, "Test Description 1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "description", "Test Description 1"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "description", "Test Description 1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSLaunchTemplateConfig_description(rName, "Test Description 2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "description", "Test Description 2"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "description", "Test Description 2"), ), }, }, @@ -329,7 +302,7 @@ func TestAccAWSLaunchTemplate_description(t *testing.T) { func TestAccAWSLaunchTemplate_update(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -339,20 +312,26 @@ func TestAccAWSLaunchTemplate_update(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_asg_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "default_version", "1"), - resource.TestCheckResourceAttr(resName, "latest_version", "1"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "default_version", "1"), + resource.TestCheckResourceAttr(resourceName, "latest_version", "1"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "launch_template.0.version", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, { Config: testAccAWSLaunchTemplateConfig_asg_update, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "default_version", "1"), - resource.TestCheckResourceAttr(resName, "latest_version", "2"), - resource.TestCheckResourceAttrSet(resName, "instance_type"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "default_version", "1"), + resource.TestCheckResourceAttr(resourceName, "latest_version", "2"), + resource.TestCheckResourceAttrSet(resourceName, "instance_type"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "launch_template.0.version", "2"), ), @@ -363,7 +342,7 @@ func TestAccAWSLaunchTemplate_update(t *testing.T) { func TestAccAWSLaunchTemplate_tags(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -374,15 +353,20 @@ func TestAccAWSLaunchTemplate_tags(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_basic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - testAccCheckTags(&template.Tags, "foo", "bar"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + testAccCheckTags(&template.Tags, "test", "bar"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSLaunchTemplateConfig_tagsUpdate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - testAccCheckTags(&template.Tags, "foo", ""), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + testAccCheckTags(&template.Tags, "test", ""), testAccCheckTags(&template.Tags, "bar", "baz"), ), }, @@ -392,7 +376,7 @@ func TestAccAWSLaunchTemplate_tags(t *testing.T) { func TestAccAWSLaunchTemplate_capacityReservation_preference(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -403,16 +387,21 @@ func TestAccAWSLaunchTemplate_capacityReservation_preference(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_capacityReservation_preference(rInt, "open"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSLaunchTemplate_capacityReservation_target(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -423,9 +412,14 @@ func TestAccAWSLaunchTemplate_capacityReservation_target(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_capacityReservation_target(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -433,7 +427,7 @@ func TestAccAWSLaunchTemplate_capacityReservation_target(t *testing.T) { func TestAccAWSLaunchTemplate_creditSpecification_nonBurstable(t *testing.T) { var template ec2.LaunchTemplate rName := acctest.RandomWithPrefix("tf-acc-test") - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -443,9 +437,15 @@ func TestAccAWSLaunchTemplate_creditSpecification_nonBurstable(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_creditSpecification(rName, "m1.small", "standard"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"credit_specification"}, + }, }, }) } @@ -453,7 +453,7 @@ func TestAccAWSLaunchTemplate_creditSpecification_nonBurstable(t *testing.T) { func TestAccAWSLaunchTemplate_creditSpecification_t2(t *testing.T) { var template ec2.LaunchTemplate rName := acctest.RandomWithPrefix("tf-acc-test") - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -463,11 +463,16 @@ func TestAccAWSLaunchTemplate_creditSpecification_t2(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_creditSpecification(rName, "t2.micro", "unlimited"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -475,7 +480,7 @@ func TestAccAWSLaunchTemplate_creditSpecification_t2(t *testing.T) { func TestAccAWSLaunchTemplate_creditSpecification_t3(t *testing.T) { var template ec2.LaunchTemplate rName := acctest.RandomWithPrefix("tf-acc-test") - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -485,11 +490,16 @@ func TestAccAWSLaunchTemplate_creditSpecification_t3(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_creditSpecification(rName, "t3.micro", "unlimited"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -518,7 +528,7 @@ func TestAccAWSLaunchTemplate_IamInstanceProfile_EmptyConfigurationBlock(t *test func TestAccAWSLaunchTemplate_networkInterface(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.test" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -528,20 +538,25 @@ func TestAccAWSLaunchTemplate_networkInterface(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_networkInterface, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "network_interfaces.#", "1"), - resource.TestCheckResourceAttrSet(resName, "network_interfaces.0.network_interface_id"), - resource.TestCheckResourceAttr(resName, "network_interfaces.0.associate_public_ip_address", "false"), - resource.TestCheckResourceAttr(resName, "network_interfaces.0.ipv4_address_count", "2"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.associate_public_ip_address", "false"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv4_address_count", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSLaunchTemplate_networkInterface_ipv6Addresses(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.test" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -551,18 +566,23 @@ func TestAccAWSLaunchTemplate_networkInterface_ipv6Addresses(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_networkInterface_ipv6Addresses, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "network_interfaces.#", "1"), - resource.TestCheckResourceAttr(resName, "network_interfaces.0.ipv6_addresses.#", "2"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv6_addresses.#", "2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSLaunchTemplate_networkInterface_ipv6AddressCount(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.foo" + resourceName := "aws_launch_template.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -573,11 +593,16 @@ func TestAccAWSLaunchTemplate_networkInterface_ipv6AddressCount(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_ipv6_count(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "network_interfaces.#", "1"), - resource.TestCheckResourceAttr(resName, "network_interfaces.0.ipv6_address_count", "1"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv6_address_count", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -585,8 +610,8 @@ func TestAccAWSLaunchTemplate_networkInterface_ipv6AddressCount(t *testing.T) { func TestAccAWSLaunchTemplate_instanceMarketOptions(t *testing.T) { var template ec2.LaunchTemplate var group autoscaling.Group - templateName := "aws_launch_template.test" groupName := "aws_autoscaling_group.test" + resourceName := "aws_launch_template.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -596,21 +621,27 @@ func TestAccAWSLaunchTemplate_instanceMarketOptions(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_instanceMarketOptions_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(templateName, &template), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), testAccCheckAWSAutoScalingGroupExists(groupName, &group), - resource.TestCheckResourceAttr(templateName, "instance_market_options.#", "1"), - resource.TestCheckResourceAttr(templateName, "instance_market_options.0.spot_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_market_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_market_options.0.spot_options.#", "1"), resource.TestCheckResourceAttr(groupName, "launch_template.#", "1"), resource.TestCheckResourceAttr(groupName, "launch_template.0.version", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix", "instance_market_options"}, + }, { Config: testAccAWSLaunchTemplateConfig_instanceMarketOptions_update, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(templateName, &template), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), testAccCheckAWSAutoScalingGroupExists(groupName, &group), - resource.TestCheckResourceAttr(templateName, "instance_market_options.#", "1"), - resource.TestCheckResourceAttr(templateName, "instance_market_options.0.spot_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_market_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "instance_market_options.0.spot_options.#", "1"), resource.TestCheckResourceAttr(groupName, "launch_template.#", "1"), resource.TestCheckResourceAttr(groupName, "launch_template.0.version", "2"), ), @@ -621,7 +652,7 @@ func TestAccAWSLaunchTemplate_instanceMarketOptions(t *testing.T) { func TestAccAWSLaunchTemplate_licenseSpecification(t *testing.T) { var template ec2.LaunchTemplate - resName := "aws_launch_template.example" + resourceName := "aws_launch_template.example" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -632,10 +663,15 @@ func TestAccAWSLaunchTemplate_licenseSpecification(t *testing.T) { { Config: testAccAWSLaunchTemplateConfig_licenseSpecification(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLaunchTemplateExists(resName, &template), - resource.TestCheckResourceAttr(resName, "license_specification.#", "1"), + testAccCheckAWSLaunchTemplateExists(resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "license_specification.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -714,11 +750,11 @@ func testAccCheckAWSLaunchTemplateDisappears(launchTemplate *ec2.LaunchTemplate) func testAccAWSLaunchTemplateConfig_basic(rInt int) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { - name = "foo_%d" +resource "aws_launch_template" "test" { + name = "test_%d" tags = { - foo = "bar" + test = "bar" } } `, rInt) @@ -726,8 +762,8 @@ resource "aws_launch_template" "foo" { func testAccAWSLaunchTemplateConfig_ipv6_count(rInt int) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { - name = "set_ipv6_count_foo_%d" +resource "aws_launch_template" "test" { + name = "set_ipv6_count_test_%d" network_interfaces { ipv6_address_count = 1 @@ -860,8 +896,8 @@ resource "aws_launch_template" "test" { func testAccAWSLaunchTemplateConfig_data(rInt int) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { - name = "foo_%d" +resource "aws_launch_template" "test" { + name = "test_%d" block_device_mappings { device_name = "test" @@ -923,8 +959,8 @@ resource "aws_launch_template" "foo" { func testAccAWSLaunchTemplateConfig_tagsUpdate(rInt int) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { - name = "foo_%d" +resource "aws_launch_template" "test" { + name = "test_%d" tags = { bar = "baz" @@ -935,8 +971,8 @@ resource "aws_launch_template" "foo" { func testAccAWSLaunchTemplateConfig_capacityReservation_preference(rInt int, preference string) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { - name = "foo_%d" +resource "aws_launch_template" "test" { + name = "test_%d" capacity_reservation_specification { capacity_reservation_preference = %q @@ -956,8 +992,8 @@ resource "aws_ec2_capacity_reservation" "test" { instance_type = "t2.micro" } -resource "aws_launch_template" "foo" { - name = "foo_%d" +resource "aws_launch_template" "test" { + name = "test_%d" capacity_reservation_specification { capacity_reservation_target { @@ -970,7 +1006,7 @@ resource "aws_launch_template" "foo" { func testAccAWSLaunchTemplateConfig_creditSpecification(rName, instanceType, cpuCredits string) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { +resource "aws_launch_template" "test" { instance_type = %q name = %q @@ -999,7 +1035,7 @@ resource "aws_licensemanager_license_configuration" "example" { } resource "aws_launch_template" "example" { - name = "foo_%d" + name = "test_%d" license_specification { license_configuration_arn = "${aws_licensemanager_license_configuration.example.id}" @@ -1010,7 +1046,7 @@ resource "aws_launch_template" "example" { func testAccAWSLaunchTemplateConfig_description(rName, description string) string { return fmt.Sprintf(` -resource "aws_launch_template" "foo" { +resource "aws_launch_template" "test" { name = "%s" description = "%s" } @@ -1065,8 +1101,8 @@ data "aws_ami" "test_ami" { } } -resource "aws_launch_template" "foo" { - name_prefix = "foobar" +resource "aws_launch_template" "test" { + name_prefix = "testbar" image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.micro" } @@ -1079,8 +1115,8 @@ resource "aws_autoscaling_group" "bar" { max_size = 0 min_size = 0 launch_template { - id = "${aws_launch_template.foo.id}" - version = "${aws_launch_template.foo.latest_version}" + id = "${aws_launch_template.test.id}" + version = "${aws_launch_template.test.latest_version}" } } ` @@ -1096,8 +1132,8 @@ data "aws_ami" "test_ami" { } } -resource "aws_launch_template" "foo" { - name_prefix = "foobar" +resource "aws_launch_template" "test" { + name_prefix = "testbar" image_id = "${data.aws_ami.test_ami.id}" instance_type = "t2.nano" } @@ -1110,8 +1146,8 @@ resource "aws_autoscaling_group" "bar" { max_size = 0 min_size = 0 launch_template { - id = "${aws_launch_template.foo.id}" - version = "${aws_launch_template.foo.latest_version}" + id = "${aws_launch_template.test.id}" + version = "${aws_launch_template.test.latest_version}" } } ` From c21afb308f4c89a1bb03e9ca1cfa6e315b7789c0 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Fri, 25 Oct 2019 14:29:16 +0200 Subject: [PATCH 32/55] Import test refactor for EC2 instances --- aws/resource_aws_instance_test.go | 1424 +++++++++++++++++------------ 1 file changed, 863 insertions(+), 561 deletions(-) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index b0e81032a57..4d1b89f503e 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -128,31 +128,10 @@ func TestFetchRootDevice(t *testing.T) { } } -func TestAccAWSInstance_importBasic(t *testing.T) { - resourceName := "aws_instance.foo" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccInstanceConfigVPC, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"associate_public_ip_address", "user_data"}, - }, - }, - }) -} - -func TestAccAWSInstance_importInDefaultVpcBySgName(t *testing.T) { - resourceName := "aws_instance.foo" +func TestAccAWSInstance_inDefaultVpcBySgName(t *testing.T) { + resourceName := "aws_instance.test" rInt := acctest.RandInt() + var v ec2.Instance resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -161,8 +140,11 @@ func TestAccAWSInstance_importInDefaultVpcBySgName(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceConfigInDefaultVpcBySgName(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists( + resourceName, &v), + ), }, - { ResourceName: resourceName, ImportState: true, @@ -172,9 +154,10 @@ func TestAccAWSInstance_importInDefaultVpcBySgName(t *testing.T) { }) } -func TestAccAWSInstance_importInDefaultVpcBySgId(t *testing.T) { - resourceName := "aws_instance.foo" +func TestAccAWSInstance_inDefaultVpcBySgId(t *testing.T) { + resourceName := "aws_instance.test" rInt := acctest.RandInt() + var v ec2.Instance resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -183,8 +166,11 @@ func TestAccAWSInstance_importInDefaultVpcBySgId(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceConfigInDefaultVpcBySgId(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists( + resourceName, &v), + ), }, - { ResourceName: resourceName, ImportState: true, @@ -194,9 +180,10 @@ func TestAccAWSInstance_importInDefaultVpcBySgId(t *testing.T) { }) } -func TestAccAWSInstance_importInEc2Classic(t *testing.T) { - resourceName := "aws_instance.foo" +func TestAccAWSInstance_inEc2Classic(t *testing.T) { + resourceName := "aws_instance.test" rInt := acctest.RandInt() + var v ec2.Instance // EC2 Classic enabled oldvar := os.Getenv("AWS_DEFAULT_REGION") @@ -210,8 +197,11 @@ func TestAccAWSInstance_importInEc2Classic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceConfigInEc2Classic(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists( + resourceName, &v), + ), }, - { Config: testAccInstanceConfigInEc2Classic(rInt), ResourceName: resourceName, @@ -226,7 +216,7 @@ func TestAccAWSInstance_importInEc2Classic(t *testing.T) { func TestAccAWSInstance_basic(t *testing.T) { var v ec2.Instance var vol *ec2.Volume - + resourceName := "aws_instance.test" rInt := acctest.RandInt() testCheck := func(rInt int) func(*terraform.State) error { @@ -248,7 +238,7 @@ func TestAccAWSInstance_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ @@ -266,26 +256,29 @@ func TestAccAWSInstance_basic(t *testing.T) { return err }, }, - { Config: testAccInstanceConfig(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), testCheck(rInt), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), + resourceName, "ebs_block_device.#", "0"), resource.TestMatchResourceAttr( - "aws_instance.foo", + resourceName, "arn", regexp.MustCompile(`^arn:[^:]+:ec2:[^:]+:\d{12}:instance/i-.+`)), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // We repeat the exact same test so that we can be sure // that the user data hash stuff is working without generating // an incorrect diff. @@ -293,17 +286,16 @@ func TestAccAWSInstance_basic(t *testing.T) { Config: testAccInstanceConfig(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), testCheck(rInt), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "user_data", "3dc39dda39be1205215e776bad998da361a5955d"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), + resourceName, "ebs_block_device.#", "0"), ), }, - // Clean up volume created above { Config: testAccInstanceConfig(rInt), @@ -319,8 +311,8 @@ func TestAccAWSInstance_basic(t *testing.T) { func TestAccAWSInstance_EbsBlockDevice_KmsKeyArn(t *testing.T) { var instance ec2.Instance - kmsKeyResourceName := "aws_kms_key.foo" - resourceName := "aws_instance.foo" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -342,8 +334,8 @@ func TestAccAWSInstance_EbsBlockDevice_KmsKeyArn(t *testing.T) { func TestAccAWSInstance_RootBlockDevice_KmsKeyArn(t *testing.T) { var instance ec2.Instance - kmsKeyResourceName := "aws_kms_key.foo" - resourceName := "aws_instance.foo" + kmsKeyResourceName := "aws_kms_key.test" + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -359,18 +351,23 @@ func TestAccAWSInstance_RootBlockDevice_KmsKeyArn(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "root_block_device.0.kms_key_id", kmsKeyResourceName, "arn"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_userDataBase64(t *testing.T) { var v ec2.Instance - + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ @@ -378,19 +375,26 @@ func TestAccAWSInstance_userDataBase64(t *testing.T) { Config: testAccInstanceConfigWithUserDataBase64(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "user_data_base64", "aGVsbG8gd29ybGQ="), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"user_data"}, + }, }, }) } func TestAccAWSInstance_GP2IopsDevice(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheck := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -412,7 +416,7 @@ func TestAccAWSInstance_GP2IopsDevice(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"ephemeral_block_device", "user_data"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -422,34 +426,46 @@ func TestAccAWSInstance_GP2IopsDevice(t *testing.T) { //Config: testAccInstanceConfigBlockDevices, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), + resourceName, "root_block_device.#", "1"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), + resourceName, "root_block_device.0.volume_size", "11"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), + resourceName, "root_block_device.0.volume_type", "gp2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.iops", "100"), + resourceName, "root_block_device.0.iops", "100"), testCheck(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_GP2WithIopsValue(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"ephemeral_block_device", "user_data"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceGP2WithIopsValue, - Check: testAccCheckInstanceExists("aws_instance.foo", &v), + Check: testAccCheckInstanceExists(resourceName, &v), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, }, { Config: testAccInstanceGP2WithIopsValue, @@ -462,6 +478,7 @@ func TestAccAWSInstance_GP2WithIopsValue(t *testing.T) { func TestAccAWSInstance_blockDevices(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheck := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -498,7 +515,7 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"ephemeral_block_device"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -507,68 +524,75 @@ func TestAccAWSInstance_blockDevices(t *testing.T) { Config: testAccInstanceConfigBlockDevices, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), + resourceName, "root_block_device.#", "1"), resource.TestMatchResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), + resourceName, "root_block_device.0.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), + resourceName, "root_block_device.0.volume_size", "11"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), + resourceName, "root_block_device.0.volume_type", "gp2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "3"), + resourceName, "ebs_block_device.#", "3"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.device_name", "/dev/sdb"), + resourceName, "ebs_block_device.2576023345.device_name", "/dev/sdb"), resource.TestMatchResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), + resourceName, "ebs_block_device.2576023345.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.volume_size", "9"), + resourceName, "ebs_block_device.2576023345.volume_size", "9"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2576023345.volume_type", "gp2"), + resourceName, "ebs_block_device.2576023345.volume_type", "gp2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.device_name", "/dev/sdc"), + resourceName, "ebs_block_device.2554893574.device_name", "/dev/sdc"), resource.TestMatchResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), + resourceName, "ebs_block_device.2554893574.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.volume_size", "10"), + resourceName, "ebs_block_device.2554893574.volume_size", "10"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.volume_type", "io1"), + resourceName, "ebs_block_device.2554893574.volume_type", "io1"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2554893574.iops", "100"), + resourceName, "ebs_block_device.2554893574.iops", "100"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.device_name", "/dev/sdd"), + resourceName, "ebs_block_device.2634515331.device_name", "/dev/sdd"), resource.TestMatchResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), + resourceName, "ebs_block_device.2634515331.volume_id", regexp.MustCompile("vol-[a-z0-9]+")), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.encrypted", "true"), + resourceName, "ebs_block_device.2634515331.encrypted", "true"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.2634515331.volume_size", "12"), + resourceName, "ebs_block_device.2634515331.volume_size", "12"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.#", "1"), + resourceName, "ephemeral_block_device.#", "1"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.1692014856.device_name", "/dev/sde"), + resourceName, "ephemeral_block_device.1692014856.device_name", "/dev/sde"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.1692014856.virtual_name", "ephemeral0"), + resourceName, "ephemeral_block_device.1692014856.virtual_name", "ephemeral0"), testCheck(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, + }, }, }) } func TestAccAWSInstance_rootInstanceStore(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: ` - resource "aws_instance" "foo" { + resource "aws_instance" "test" { # us-west-2 # Amazon Linux HVM Instance Store 64-bit (2016.09.0) # https://aws.amazon.com/amazon-linux-ami @@ -580,25 +604,31 @@ func TestAccAWSInstance_rootInstanceStore(t *testing.T) { }`, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "ami", "ami-44c36524"), + resourceName, "ami", "ami-44c36524"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), + resourceName, "ebs_block_device.#", "0"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_optimized", "false"), + resourceName, "ebs_optimized", "false"), resource.TestCheckResourceAttr( - "aws_instance.foo", "instance_type", "m3.medium"), + resourceName, "instance_type", "m3.medium"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "0"), + resourceName, "root_block_device.#", "0"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_noAMIEphemeralDevices(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheck := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -629,14 +659,14 @@ func TestAccAWSInstance_noAMIEphemeralDevices(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"ephemeral_block_device"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: ` - resource "aws_instance" "foo" { + resource "aws_instance" "test" { # us-west-2 ami = "ami-01f05461" // This AMI (Ubuntu) contains two ephemerals @@ -657,40 +687,47 @@ func TestAccAWSInstance_noAMIEphemeralDevices(t *testing.T) { }`, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "ami", "ami-01f05461"), + resourceName, "ami", "ami-01f05461"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_optimized", "false"), + resourceName, "ebs_optimized", "false"), resource.TestCheckResourceAttr( - "aws_instance.foo", "instance_type", "c3.large"), + resourceName, "instance_type", "c3.large"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.#", "1"), + resourceName, "root_block_device.#", "1"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "11"), + resourceName, "root_block_device.0.volume_size", "11"), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), + resourceName, "root_block_device.0.volume_type", "gp2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ebs_block_device.#", "0"), + resourceName, "ebs_block_device.#", "0"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.#", "2"), + resourceName, "ephemeral_block_device.#", "2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.172787947.device_name", "/dev/sdb"), + resourceName, "ephemeral_block_device.172787947.device_name", "/dev/sdb"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.172787947.no_device", "true"), + resourceName, "ephemeral_block_device.172787947.no_device", "true"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.3336996981.device_name", "/dev/sdc"), + resourceName, "ephemeral_block_device.3336996981.device_name", "/dev/sdc"), resource.TestCheckResourceAttr( - "aws_instance.foo", "ephemeral_block_device.3336996981.no_device", "true"), + resourceName, "ephemeral_block_device.3336996981.no_device", "true"), testCheck(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, + }, }, }) } func TestAccAWSInstance_sourceDestCheck(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheck := func(enabled bool) resource.TestCheckFunc { return func(*terraform.State) error { @@ -707,30 +744,33 @@ func TestAccAWSInstance_sourceDestCheck(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigSourceDestDisable, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheck(false), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfigSourceDestEnable, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheck(true), ), }, - { Config: testAccInstanceConfigSourceDestDisable, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheck(false), ), }, @@ -740,6 +780,7 @@ func TestAccAWSInstance_sourceDestCheck(t *testing.T) { func TestAccAWSInstance_disableApiTermination(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" checkDisableApiTermination := func(expected bool) resource.TestCheckFunc { return func(*terraform.State) error { @@ -761,22 +802,26 @@ func TestAccAWSInstance_disableApiTermination(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigDisableAPITermination(true), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), checkDisableApiTermination(true), ), }, - + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfigDisableAPITermination(false), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), checkDisableApiTermination(false), ), }, @@ -786,10 +831,11 @@ func TestAccAWSInstance_disableApiTermination(t *testing.T) { func TestAccAWSInstance_vpc(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"associate_public_ip_address"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -798,13 +844,19 @@ func TestAccAWSInstance_vpc(t *testing.T) { Config: testAccInstanceConfigVPC, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "user_data", "562a3e32810edf6ff09994f050f12e799452379d"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"associate_public_ip_address", "user_data"}, + }, }, }) } @@ -812,10 +864,11 @@ func TestAccAWSInstance_vpc(t *testing.T) { func TestAccAWSInstance_placementGroup(t *testing.T) { var v ec2.Instance rStr := acctest.RandString(5) + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"associate_public_ip_address"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -824,19 +877,25 @@ func TestAccAWSInstance_placementGroup(t *testing.T) { Config: testAccInstanceConfigPlacementGroup(rStr), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "placement_group", fmt.Sprintf("testAccInstanceConfigPlacementGroup_%s", rStr)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_ipv6_supportAddressCount(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -847,13 +906,18 @@ func TestAccAWSInstance_ipv6_supportAddressCount(t *testing.T) { Config: testAccInstanceConfigIpv6Support, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "ipv6_address_count", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -875,6 +939,7 @@ func TestAccAWSInstance_ipv6AddressCountAndSingleAddressCausesError(t *testing.T func TestAccAWSInstance_ipv6_supportAddressCountWithIpv4(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -885,19 +950,25 @@ func TestAccAWSInstance_ipv6_supportAddressCountWithIpv4(t *testing.T) { Config: testAccInstanceConfigIpv6SupportWithIpv4, Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", + resourceName, "ipv6_address_count", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_multipleRegions(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" // record the initialized providers so that we can use them to // check for the instances in each region @@ -911,9 +982,9 @@ func TestAccAWSInstance_multipleRegions(t *testing.T) { { Config: testAccInstanceConfigMultipleRegions, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExistsWithProvider("aws_instance.foo", &v, + testAccCheckInstanceExistsWithProvider(resourceName, &v, testAccAwsRegionProviderFunc("us-west-2", &providers)), - testAccCheckInstanceExistsWithProvider("aws_instance.bar", &v, + testAccCheckInstanceExistsWithProvider("aws_instance.test2", &v, testAccAwsRegionProviderFunc("us-east-1", &providers)), ), }, @@ -923,12 +994,12 @@ func TestAccAWSInstance_multipleRegions(t *testing.T) { func TestAccAWSInstance_NetworkInstanceSecurityGroups(t *testing.T) { var v ec2.Instance - + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo_instance", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"associate_public_ip_address"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -937,21 +1008,26 @@ func TestAccAWSInstance_NetworkInstanceSecurityGroups(t *testing.T) { Config: testAccInstanceNetworkInstanceSecurityGroups(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), + resourceName, &v), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups(t *testing.T) { var v ec2.Instance - + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo_instance", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ @@ -959,22 +1035,27 @@ func TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups(t *testing.T) { Config: testAccInstanceNetworkInstanceVPCSecurityGroupIDs(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "security_groups.#", "0"), + resourceName, "security_groups.#", "0"), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "vpc_security_group_ids.#", "1"), + resourceName, "vpc_security_group_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceNetworkInstanceVPCRemoveSecurityGroupIDs(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "security_groups.#", "0"), + resourceName, "security_groups.#", "0"), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "vpc_security_group_ids.#", "1"), + resourceName, "vpc_security_group_ids.#", "1"), ), ExpectError: regexp.MustCompile(`VPC-based instances require at least one security group to be attached`), }, @@ -984,12 +1065,12 @@ func TestAccAWSInstance_NetworkInstanceRemovingAllSecurityGroups(t *testing.T) { func TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs(t *testing.T) { var v ec2.Instance - + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo_instance", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ @@ -997,19 +1078,25 @@ func TestAccAWSInstance_NetworkInstanceVPCSecurityGroupIDs(t *testing.T) { Config: testAccInstanceNetworkInstanceVPCSecurityGroupIDs(rInt), Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists( - "aws_instance.foo_instance", &v), + resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "security_groups.#", "0"), + resourceName, "security_groups.#", "0"), resource.TestCheckResourceAttr( - "aws_instance.foo_instance", "vpc_security_group_ids.#", "1"), + resourceName, "vpc_security_group_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_tags(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1019,18 +1106,23 @@ func TestAccAWSInstance_tags(t *testing.T) { { Config: testAccCheckInstanceConfigTags, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testAccCheckTags(&v.Tags, "foo", "bar"), + testAccCheckInstanceExists(resourceName, &v), + testAccCheckTags(&v.Tags, "test", "test2"), // Guard against regression of https://github.com/hashicorp/terraform/issues/914 testAccCheckTags(&v.Tags, "#", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccCheckInstanceConfigTagsUpdate, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), - testAccCheckTags(&v.Tags, "foo", ""), - testAccCheckTags(&v.Tags, "bar", "baz"), + testAccCheckInstanceExists(resourceName, &v), + testAccCheckTags(&v.Tags, "test", ""), + testAccCheckTags(&v.Tags, "test2", "test3"), ), }, }, @@ -1039,6 +1131,7 @@ func TestAccAWSInstance_tags(t *testing.T) { func TestAccAWSInstance_volumeTags(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1048,39 +1141,45 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { { Config: testAccCheckInstanceConfigNoVolumeTags, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), resource.TestCheckNoResourceAttr( - "aws_instance.foo", "volume_tags"), + resourceName, "volume_tags"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ephemeral_block_device"}, + }, { Config: testAccCheckInstanceConfigWithVolumeTags, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.%", "1"), + resourceName, "volume_tags.%", "1"), resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Name", "acceptance-test-volume-tag"), + resourceName, "volume_tags.Name", "acceptance-test-volume-tag"), ), }, { Config: testAccCheckInstanceConfigWithVolumeTagsUpdate, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.%", "2"), + resourceName, "volume_tags.%", "2"), resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Name", "acceptance-test-volume-tag"), + resourceName, "volume_tags.Name", "acceptance-test-volume-tag"), resource.TestCheckResourceAttr( - "aws_instance.foo", "volume_tags.Environment", "dev"), + resourceName, "volume_tags.Environment", "dev"), ), }, { Config: testAccCheckInstanceConfigNoVolumeTags, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), resource.TestCheckNoResourceAttr( - "aws_instance.foo", "volume_tags"), + resourceName, "volume_tags"), ), }, }, @@ -1089,6 +1188,7 @@ func TestAccAWSInstance_volumeTags(t *testing.T) { func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1098,9 +1198,14 @@ func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { { Config: testAccCheckInstanceConfigWithAttachedVolume, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1108,6 +1213,7 @@ func TestAccAWSInstance_volumeTagsComputed(t *testing.T) { func TestAccAWSInstance_instanceProfileChange(t *testing.T) { var v ec2.Instance rName := acctest.RandString(5) + resourceName := "aws_instance.test" testCheckInstanceProfile := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1121,20 +1227,25 @@ func TestAccAWSInstance_instanceProfileChange(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigWithoutInstanceProfile(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfigWithInstanceProfile(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheckInstanceProfile(), ), }, @@ -1145,6 +1256,7 @@ func TestAccAWSInstance_instanceProfileChange(t *testing.T) { func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) { var v ec2.Instance rName := acctest.RandString(5) + resourceName := "aws_instance.test" testCheckInstanceProfile := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1158,23 +1270,29 @@ func TestAccAWSInstance_withIamInstanceProfile(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigWithInstanceProfile(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheckInstanceProfile(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_privateIP(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheckPrivateIP := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1188,23 +1306,29 @@ func TestAccAWSInstance_privateIP(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigPrivateIP, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheckPrivateIP(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheckPrivateIP := func() resource.TestCheckFunc { return func(*terraform.State) error { @@ -1218,7 +1342,7 @@ func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"associate_public_ip_address"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -1226,18 +1350,22 @@ func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { { Config: testAccInstanceConfigAssociatePublicIPAndPrivateIP, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheckPrivateIP(), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -// Guard against regression with KeyPairs -// https://github.com/hashicorp/terraform/issues/2302 func TestAccAWSInstance_keyPairCheck(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" testCheckKeyPair := func(keyName string) resource.TestCheckFunc { return func(*terraform.State) error { @@ -1256,7 +1384,7 @@ func TestAccAWSInstance_keyPairCheck(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, IDRefreshIgnore: []string{"source_dest_check"}, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, @@ -1264,7 +1392,7 @@ func TestAccAWSInstance_keyPairCheck(t *testing.T) { { Config: testAccInstanceConfigKeyPair(keyPairName), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), testCheckKeyPair(keyPairName), ), }, @@ -1274,6 +1402,7 @@ func TestAccAWSInstance_keyPairCheck(t *testing.T) { func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1283,11 +1412,17 @@ func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { { Config: testAccInstanceConfigRootBlockDeviceMismatch, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr( - "aws_instance.foo", "root_block_device.0.volume_size", "13"), + resourceName, "root_block_device.0.volume_size", "13"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"root_block_device"}, + }, }, }) } @@ -1303,17 +1438,18 @@ func TestAccAWSInstance_rootBlockDeviceMismatch(t *testing.T) { // set NewRemoved on the .# field when it changes to 0. func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) { var v ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_instance.foo", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckInstanceDestroy, Steps: []resource.TestStep{ { Config: testAccInstanceConfigForceNewAndTagsDrift, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), driftTags(&v), ), ExpectNonEmptyPlan: true, @@ -1321,9 +1457,14 @@ func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) { { Config: testAccInstanceConfigForceNewAndTagsDrift_Update, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &v), + testAccCheckInstanceExists(resourceName, &v), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1331,6 +1472,7 @@ func TestAccAWSInstance_forceNewAndTagsDrift(t *testing.T) { func TestAccAWSInstance_changeInstanceType(t *testing.T) { var before ec2.Instance var after ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1340,13 +1482,18 @@ func TestAccAWSInstance_changeInstanceType(t *testing.T) { { Config: testAccInstanceConfigWithSmallInstanceType, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), + testAccCheckInstanceExists(resourceName, &before), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfigUpdateInstanceType, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), + testAccCheckInstanceExists(resourceName, &after), testAccCheckInstanceNotRecreated( t, &before, &after), ), @@ -1358,6 +1505,7 @@ func TestAccAWSInstance_changeInstanceType(t *testing.T) { func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) { var instance ec2.Instance var ini ec2.NetworkInterface + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1367,11 +1515,17 @@ func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) { { Config: testAccInstanceConfigPrimaryNetworkInterface, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &instance), - testAccCheckAWSENIExists("aws_network_interface.bar", &ini), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), + testAccCheckInstanceExists(resourceName, &instance), + testAccCheckAWSENIExists("aws_network_interface.test", &ini), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface"}, + }, }, }) } @@ -1379,6 +1533,7 @@ func TestAccAWSInstance_primaryNetworkInterface(t *testing.T) { func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { var instance ec2.Instance var ini ec2.NetworkInterface + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1388,11 +1543,17 @@ func TestAccAWSInstance_primaryNetworkInterfaceSourceDestCheck(t *testing.T) { { Config: testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &instance), - testAccCheckAWSENIExists("aws_network_interface.bar", &ini), - resource.TestCheckResourceAttr("aws_instance.foo", "source_dest_check", "false"), + testAccCheckInstanceExists(resourceName, &instance), + testAccCheckAWSENIExists("aws_network_interface.test", &ini), + resource.TestCheckResourceAttr(resourceName, "source_dest_check", "false"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface"}, + }, }, }) } @@ -1402,6 +1563,7 @@ func TestAccAWSInstance_addSecondaryInterface(t *testing.T) { var after ec2.Instance var iniPrimary ec2.NetworkInterface var iniSecondary ec2.NetworkInterface + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1411,17 +1573,23 @@ func TestAccAWSInstance_addSecondaryInterface(t *testing.T) { { Config: testAccInstanceConfigAddSecondaryNetworkInterfaceBefore, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), + testAccCheckInstanceExists(resourceName, &before), testAccCheckAWSENIExists("aws_network_interface.primary", &iniPrimary), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_interface"}, + }, { Config: testAccInstanceConfigAddSecondaryNetworkInterfaceAfter, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), + testAccCheckInstanceExists(resourceName, &after), testAccCheckAWSENIExists("aws_network_interface.secondary", &iniSecondary), - resource.TestCheckResourceAttr("aws_instance.foo", "network_interface.#", "1"), + resource.TestCheckResourceAttr(resourceName, "network_interface.#", "1"), ), }, }, @@ -1432,6 +1600,7 @@ func TestAccAWSInstance_addSecondaryInterface(t *testing.T) { func TestAccAWSInstance_addSecurityGroupNetworkInterface(t *testing.T) { var before ec2.Instance var after ec2.Instance + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1441,15 +1610,20 @@ func TestAccAWSInstance_addSecurityGroupNetworkInterface(t *testing.T) { { Config: testAccInstanceConfigAddSecurityGroupBefore, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &before), - resource.TestCheckResourceAttr("aws_instance.foo", "vpc_security_group_ids.#", "1"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", "1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfigAddSecurityGroupAfter, Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &after), - resource.TestCheckResourceAttr("aws_instance.foo", "vpc_security_group_ids.#", "2"), + testAccCheckInstanceExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", "2"), ), }, }, @@ -1459,7 +1633,7 @@ func TestAccAWSInstance_addSecurityGroupNetworkInterface(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_defaultPrivate(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1470,11 +1644,16 @@ func TestAccAWSInstance_associatePublic_defaultPrivate(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_defaultPrivate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "false"), - resource.TestCheckResourceAttr(resName, "public_ip", ""), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "false"), + resource.TestCheckResourceAttr(resourceName, "public_ip", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1482,7 +1661,7 @@ func TestAccAWSInstance_associatePublic_defaultPrivate(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_defaultPublic(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1493,11 +1672,16 @@ func TestAccAWSInstance_associatePublic_defaultPublic(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_defaultPublic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "true"), - resource.TestCheckResourceAttrSet(resName, "public_ip"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "true"), + resource.TestCheckResourceAttrSet(resourceName, "public_ip"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1505,7 +1689,7 @@ func TestAccAWSInstance_associatePublic_defaultPublic(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_explicitPublic(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1516,11 +1700,16 @@ func TestAccAWSInstance_associatePublic_explicitPublic(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_explicitPublic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "true"), - resource.TestCheckResourceAttrSet(resName, "public_ip"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "true"), + resource.TestCheckResourceAttrSet(resourceName, "public_ip"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1528,7 +1717,7 @@ func TestAccAWSInstance_associatePublic_explicitPublic(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_explicitPrivate(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1539,11 +1728,16 @@ func TestAccAWSInstance_associatePublic_explicitPrivate(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_explicitPrivate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "false"), - resource.TestCheckResourceAttr(resName, "public_ip", ""), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "false"), + resource.TestCheckResourceAttr(resourceName, "public_ip", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1551,7 +1745,7 @@ func TestAccAWSInstance_associatePublic_explicitPrivate(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_overridePublic(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1562,11 +1756,16 @@ func TestAccAWSInstance_associatePublic_overridePublic(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_overridePublic(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "true"), - resource.TestCheckResourceAttrSet(resName, "public_ip"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "true"), + resource.TestCheckResourceAttrSet(resourceName, "public_ip"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1574,7 +1773,7 @@ func TestAccAWSInstance_associatePublic_overridePublic(t *testing.T) { // https://github.com/terraform-providers/terraform-provider-aws/issues/227 func TestAccAWSInstance_associatePublic_overridePrivate(t *testing.T) { var before ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1585,18 +1784,23 @@ func TestAccAWSInstance_associatePublic_overridePrivate(t *testing.T) { { Config: testAccInstanceConfig_associatePublic_overridePrivate(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "associate_public_ip_address", "false"), - resource.TestCheckResourceAttr(resName, "public_ip", ""), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "associate_public_ip_address", "false"), + resource.TestCheckResourceAttr(resourceName, "public_ip", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_getPasswordData_falseToTrue(t *testing.T) { var before, after ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1607,18 +1811,23 @@ func TestAccAWSInstance_getPasswordData_falseToTrue(t *testing.T) { { Config: testAccInstanceConfig_getPasswordData(false, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "get_password_data", "false"), - resource.TestCheckResourceAttr(resName, "password_data", ""), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "get_password_data", "false"), + resource.TestCheckResourceAttr(resourceName, "password_data", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_getPasswordData(true, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &after), + testAccCheckInstanceExists(resourceName, &after), testAccCheckInstanceNotRecreated(t, &before, &after), - resource.TestCheckResourceAttr(resName, "get_password_data", "true"), - resource.TestCheckResourceAttrSet(resName, "password_data"), + resource.TestCheckResourceAttr(resourceName, "get_password_data", "true"), + resource.TestCheckResourceAttrSet(resourceName, "password_data"), ), }, }, @@ -1627,7 +1836,7 @@ func TestAccAWSInstance_getPasswordData_falseToTrue(t *testing.T) { func TestAccAWSInstance_getPasswordData_trueToFalse(t *testing.T) { var before, after ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1638,18 +1847,24 @@ func TestAccAWSInstance_getPasswordData_trueToFalse(t *testing.T) { { Config: testAccInstanceConfig_getPasswordData(true, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "get_password_data", "true"), - resource.TestCheckResourceAttrSet(resName, "password_data"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "get_password_data", "true"), + resource.TestCheckResourceAttrSet(resourceName, "password_data"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password_data", "get_password_data"}, + }, { Config: testAccInstanceConfig_getPasswordData(false, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &after), + testAccCheckInstanceExists(resourceName, &after), testAccCheckInstanceNotRecreated(t, &before, &after), - resource.TestCheckResourceAttr(resName, "get_password_data", "false"), - resource.TestCheckResourceAttr(resName, "password_data", ""), + resource.TestCheckResourceAttr(resourceName, "get_password_data", "false"), + resource.TestCheckResourceAttr(resourceName, "password_data", ""), ), }, }, @@ -1672,6 +1887,12 @@ func TestAccAWSInstance_CreditSpecification_Empty_NonBurstable(t *testing.T) { testAccCheckInstanceExists(resourceName, &instance), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"credit_specification"}, + }, }, }) } @@ -1693,6 +1914,11 @@ func TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable(t *t testAccCheckInstanceExists(resourceName, &instance), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_CreditSpecification_Empty_NonBurstable(rName), Check: resource.ComposeTestCheckFunc( @@ -1705,7 +1931,7 @@ func TestAccAWSInstance_CreditSpecification_UnspecifiedToEmpty_NonBurstable(t *t func TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard(t *testing.T) { var instance ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1716,18 +1942,23 @@ func TestAccAWSInstance_creditSpecification_unspecifiedDefaultsToStandard(t *tes { Config: testAccInstanceConfig_creditSpecification_unspecified(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &instance), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_creditSpecification_standardCpuCredits(t *testing.T) { var first, second ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1738,17 +1969,22 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unspecified(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, }, @@ -1757,7 +1993,7 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits(t *testing.T) { var first, second ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1768,17 +2004,22 @@ func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unspecified(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, }, @@ -1788,7 +2029,7 @@ func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2(t *testing.T) { var instance ec2.Instance rInt := acctest.RandInt() - resName := "aws_instance.foo" + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1798,11 +2039,16 @@ func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_unknownCpuCredits(rInt, "t2.micro"), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &instance), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1810,7 +2056,7 @@ func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t2(t *testing.T) { func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3(t *testing.T) { var instance ec2.Instance rInt := acctest.RandInt() - resName := "aws_instance.foo" + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -1820,18 +2066,23 @@ func TestAccAWSInstance_creditSpecification_unknownCpuCredits_t3(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_unknownCpuCredits(rInt, "t3.micro"), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &instance), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_creditSpecification_updateCpuCredits(t *testing.T) { var first, second, third ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1842,25 +2093,30 @@ func TestAccAWSInstance_creditSpecification_updateCpuCredits(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &third), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &third), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, }, @@ -1869,7 +2125,7 @@ func TestAccAWSInstance_creditSpecification_updateCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable(t *testing.T) { var instance ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1880,16 +2136,22 @@ func TestAccAWSInstance_creditSpecification_isNotAppliedToNonBurstable(t *testin { Config: testAccInstanceConfig_creditSpecification_isNotAppliedToNonBurstable(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &instance), + testAccCheckInstanceExists(resourceName, &instance), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"credit_specification"}, + }, }, }) } func TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited(t *testing.T) { var instance ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1900,18 +2162,23 @@ func TestAccAWSInstance_creditSpecificationT3_unspecifiedDefaultsToUnlimited(t * { Config: testAccInstanceConfig_creditSpecification_unspecified_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &instance), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSInstance_creditSpecificationT3_standardCpuCredits(t *testing.T) { var first, second ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1922,17 +2189,22 @@ func TestAccAWSInstance_creditSpecificationT3_standardCpuCredits(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unspecified_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, }, @@ -1941,7 +2213,7 @@ func TestAccAWSInstance_creditSpecificationT3_standardCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits(t *testing.T) { var first, second ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1952,17 +2224,22 @@ func TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits(t *testing.T) { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unspecified_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, }, @@ -1971,7 +2248,7 @@ func TestAccAWSInstance_creditSpecificationT3_unlimitedCpuCredits(t *testing.T) func TestAccAWSInstance_creditSpecificationT3_updateCpuCredits(t *testing.T) { var first, second, third ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -1982,25 +2259,30 @@ func TestAccAWSInstance_creditSpecificationT3_updateCpuCredits(t *testing.T) { { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &first), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &first), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &second), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &second), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &third), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &third), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, }, @@ -2009,7 +2291,7 @@ func TestAccAWSInstance_creditSpecificationT3_updateCpuCredits(t *testing.T) { func TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint(t *testing.T) { var before, after ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -2020,19 +2302,24 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint(t *te { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_standardCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &after), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "standard"), + testAccCheckInstanceExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "standard"), ), - Taint: []string{resName}, + Taint: []string{resourceName}, }, }, }) @@ -2040,7 +2327,7 @@ func TestAccAWSInstance_creditSpecification_standardCpuCredits_t2Tot3Taint(t *te func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint(t *testing.T) { var before, after ec2.Instance - resName := "aws_instance.foo" + resourceName := "aws_instance.test" rInt := acctest.RandInt() resource.ParallelTest(t, resource.TestCase{ @@ -2051,19 +2338,24 @@ func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint(t *t { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &before), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccInstanceConfig_creditSpecification_unlimitedCpuCredits_t3(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists(resName, &after), - resource.TestCheckResourceAttr(resName, "credit_specification.#", "1"), - resource.TestCheckResourceAttr(resName, "credit_specification.0.cpu_credits", "unlimited"), + testAccCheckInstanceExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "credit_specification.#", "1"), + resource.TestCheckResourceAttr(resourceName, "credit_specification.0.cpu_credits", "unlimited"), ), - Taint: []string{resName}, + Taint: []string{resourceName}, }, }, }) @@ -2072,6 +2364,7 @@ func TestAccAWSInstance_creditSpecification_unlimitedCpuCredits_t2Tot3Taint(t *t func TestAccAWSInstance_disappears(t *testing.T) { var conf ec2.Instance rInt := acctest.RandInt() + resourceName := "aws_instance.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -2081,7 +2374,7 @@ func TestAccAWSInstance_disappears(t *testing.T) { { Config: testAccInstanceConfig(rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckInstanceExists("aws_instance.foo", &conf), + testAccCheckInstanceExists(resourceName, &conf), testAccCheckInstanceDisappears(&conf), ), ExpectNonEmptyPlan: true, @@ -2106,6 +2399,12 @@ func TestAccAWSInstance_UserData_EmptyStringToUnspecified(t *testing.T) { testAccCheckInstanceExists(resourceName, &instance), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"user_data"}, + }, // Switching should show no difference { Config: testAccInstanceConfig_UserData_Unspecified(rInt), @@ -2132,6 +2431,11 @@ func TestAccAWSInstance_UserData_UnspecifiedToEmptyString(t *testing.T) { testAccCheckInstanceExists(resourceName, &instance), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Switching should show no difference { Config: testAccInstanceConfig_UserData_EmptyString(rInt), @@ -2347,7 +2651,7 @@ resource "aws_security_group" "sg" { vpc_id = "${data.aws_vpc.default.id}" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" security_groups = ["${aws_security_group.sg.name}"] @@ -2383,7 +2687,7 @@ resource "aws_security_group" "sg" { vpc_id = "${data.aws_vpc.default.id}" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "${data.aws_ami.ubuntu.id}" instance_type = "t2.micro" vpc_security_group_ids = ["${aws_security_group.sg.id}"] @@ -2418,7 +2722,7 @@ resource "aws_security_group" "sg" { description = "Test security group" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "${data.aws_ami.ubuntu.id}" instance_type = "m3.medium" security_groups = ["${aws_security_group.sg.name}"] @@ -2428,9 +2732,9 @@ resource "aws_instance" "foo" { func testAccInstanceConfig_pre(rInt int) string { return fmt.Sprintf(` -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "tf_test_test" { name = "tf_test_%d" - description = "foo" + description = "test" ingress { protocol = "icmp" @@ -2444,9 +2748,9 @@ resource "aws_security_group" "tf_test_foo" { func testAccInstanceConfig(rInt int) string { return fmt.Sprintf(` -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "tf_test_test" { name = "tf_test_%d" - description = "foo" + description = "test" ingress { protocol = "icmp" @@ -2456,13 +2760,13 @@ resource "aws_security_group" "tf_test_foo" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" availability_zone = "us-west-2a" instance_type = "m1.small" - security_groups = ["${aws_security_group.tf_test_foo.name}"] + security_groups = ["${aws_security_group.tf_test_test.name}"] user_data = "foo:-with-character's" } `, rInt) @@ -2470,9 +2774,9 @@ resource "aws_instance" "foo" { func testAccInstanceConfigWithUserDataBase64(rInt int) string { return fmt.Sprintf(` -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "tf_test_test" { name = "tf_test_%d" - description = "foo" + description = "test" ingress { protocol = "icmp" @@ -2482,20 +2786,20 @@ resource "aws_security_group" "tf_test_foo" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" availability_zone = "us-west-2a" instance_type = "m1.small" - security_groups = ["${aws_security_group.tf_test_foo.name}"] + security_groups = ["${aws_security_group.tf_test_test.name}"] user_data_base64 = "${base64encode("hello world")}" } `, rInt) } const testAccInstanceConfigWithSmallInstanceType = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" availability_zone = "us-west-2a" @@ -2509,7 +2813,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigUpdateInstanceType = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" availability_zone = "us-west-2a" @@ -2523,7 +2827,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceGP2IopsDevice = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" @@ -2540,7 +2844,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceGP2WithIopsValue = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" @@ -2559,7 +2863,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigBlockDevices = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" @@ -2600,57 +2904,57 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigSourceDestEnable = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-source-dest-enable" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-source-dest-enable" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" } ` const testAccInstanceConfigSourceDestDisable = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-source-dest-disable" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-source-dest-disable" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" source_dest_check = false } ` func testAccInstanceConfigDisableAPITermination(val bool) string { return fmt.Sprintf(` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -2658,46 +2962,46 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-disable-api-termination" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" disable_api_termination = %t } `, val) } const testAccInstanceConfigVPC = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-vpc" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-vpc" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-4fccb37f" instance_type = "m1.small" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = true tenancy = "dedicated" # pre-encoded base64 data @@ -2707,7 +3011,7 @@ resource "aws_instance" "foo" { func testAccInstanceConfigPlacementGroup(rStr string) string { return fmt.Sprintf(` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -2715,28 +3019,28 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-placement-group" } } -resource "aws_placement_group" "foo" { +resource "aws_placement_group" "test" { name = "testAccInstanceConfigPlacementGroup_%s" strategy = "cluster" } # Limitations: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html#concepts-placement-groups -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" instance_type = "c3.large" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = true - placement_group = "${aws_placement_group.foo.name}" + placement_group = "${aws_placement_group.test.name}" # pre-encoded base64 data user_data = "3dc39dda39be1205215e776bad998da361a5955d" @@ -2745,7 +3049,7 @@ resource "aws_instance" "foo" { } const testAccInstanceConfigIpv6ErrorConfig = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true tags = { @@ -2753,20 +3057,20 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" + vpc_id = "${aws_vpc.test.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" tags = { Name = "tf-acc-instance-ipv6-err" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" ipv6_addresses = ["2600:1f14:bb2:e501::10"] ipv6_address_count = 1 tags = { @@ -2776,7 +3080,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigIpv6Support = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true tags = { @@ -2784,20 +3088,20 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" + vpc_id = "${aws_vpc.test.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" tags = { Name = "tf-acc-instance-ipv6-support" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" ipv6_address_count = 1 tags = { @@ -2807,7 +3111,7 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigIpv6SupportWithIpv4 = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" assign_generated_ipv6_cidr_block = true tags = { @@ -2815,20 +3119,20 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" - ipv6_cidr_block = "${cidrsubnet(aws_vpc.foo.ipv6_cidr_block, 8, 1)}" + vpc_id = "${aws_vpc.test.id}" + ipv6_cidr_block = "${cidrsubnet(aws_vpc.test.ipv6_cidr_block, 8, 1)}" tags = { Name = "tf-acc-instance-ipv6-support-with-ipv4" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = true ipv6_address_count = 1 @@ -2848,15 +3152,14 @@ provider "aws" { alias = "east" region = "us-east-1" } - -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 provider = "aws.west" ami = "ami-4fccb37f" instance_type = "m1.small" } -resource "aws_instance" "bar" { +resource "aws_instance" "test2" { # us-east-1 provider = "aws.east" ami = "ami-8c6ea9e4" @@ -2865,21 +3168,21 @@ resource "aws_instance" "bar" { ` const testAccCheckInstanceConfigTags = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-4fccb37f" instance_type = "m1.small" tags = { - foo = "bar" + test = "test2" } } ` const testAccInstanceConfigEbsBlockDeviceKmsKeyArn = ` -resource "aws_kms_key" "foo" { +resource "aws_kms_key" "test" { deletion_window_in_days = 7 } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { # us-west-2 ami = "ami-55a7ea65" @@ -2897,14 +3200,14 @@ resource "aws_instance" "foo" { ebs_block_device { device_name = "/dev/sdd" encrypted = true - kms_key_id = "${aws_kms_key.foo.arn}" + kms_key_id = "${aws_kms_key.test.arn}" volume_size = 12 } } ` const testAccInstanceConfigRootBlockDeviceKmsKeyArn = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -2912,9 +3215,9 @@ resource "aws_vpc" "foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" availability_zone = "us-west-2a" tags = { @@ -2922,19 +3225,19 @@ resource "aws_subnet" "foo" { } } -resource "aws_kms_key" "foo" { +resource "aws_kms_key" "test" { deletion_window_in_days = 7 } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-08692d171e3cf02d6" instance_type = "t3.nano" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" root_block_device { delete_on_termination = true encrypted = true - kms_key_id = "${aws_kms_key.foo.arn}" + kms_key_id = "${aws_kms_key.test.arn}" } } ` @@ -2966,7 +3269,7 @@ data "aws_ami" "debian_jessie_latest" { owners = ["379101102735"] # Debian } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "${data.aws_ami.debian_jessie_latest.id}" instance_type = "t2.medium" @@ -2982,7 +3285,7 @@ resource "aws_instance" "foo" { } resource "aws_ebs_volume" "test" { - availability_zone = "${aws_instance.foo.availability_zone}" + availability_zone = "${aws_instance.test.availability_zone}" size = "10" type = "gp2" @@ -2994,12 +3297,12 @@ resource "aws_ebs_volume" "test" { resource "aws_volume_attachment" "test" { device_name = "/dev/xvdg" volume_id = "${aws_ebs_volume.test.id}" - instance_id = "${aws_instance.foo.id}" + instance_id = "${aws_instance.test.id}" } ` const testAccCheckInstanceConfigNoVolumeTags = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-55a7ea65" instance_type = "m3.medium" @@ -3033,7 +3336,7 @@ resource "aws_instance" "foo" { ` const testAccCheckInstanceConfigWithVolumeTags = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-55a7ea65" instance_type = "m3.medium" @@ -3071,7 +3374,7 @@ resource "aws_instance" "foo" { ` const testAccCheckInstanceConfigWithVolumeTagsUpdate = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-55a7ea65" instance_type = "m3.medium" @@ -3110,11 +3413,11 @@ resource "aws_instance" "foo" { ` const testAccCheckInstanceConfigTagsUpdate = ` -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-4fccb37f" instance_type = "m1.small" tags = { - bar = "baz" + test2 = "test3" } } ` @@ -3126,12 +3429,12 @@ resource "aws_iam_role" "test" { assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-4fccb37f" instance_type = "m1.small" tags = { - bar = "baz" + test = "test3" } } `, rName) @@ -3149,62 +3452,62 @@ resource "aws_iam_instance_profile" "test" { roles = ["${aws_iam_role.test.name}"] } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-4fccb37f" instance_type = "m1.small" iam_instance_profile = "${aws_iam_instance_profile.test.name}" tags = { - bar = "baz" + test = "test3" } } `, rName, rName) } const testAccInstanceConfigPrivateIP = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-private-ip" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-private-ip" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" private_ip = "10.1.1.42" } ` const testAccInstanceConfigAssociatePublicIPAndPrivateIP = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-public-ip-and-private-ip" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-public-ip-and-private-ip" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = true private_ip = "10.1.1.42" } @@ -3213,10 +3516,10 @@ resource "aws_instance" "foo" { func testAccInstanceNetworkInstanceSecurityGroups(rInt int) string { return fmt.Sprintf(` resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -3224,10 +3527,10 @@ resource "aws_vpc" "foo" { } } -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "test" { name = "tf_test_%d" - description = "foo" - vpc_id = "${aws_vpc.foo.id}" + description = "test" + vpc_id = "${aws_vpc.test.id}" ingress { protocol = "icmp" @@ -3237,26 +3540,26 @@ resource "aws_security_group" "tf_test_foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-network-security-groups" } } -resource "aws_instance" "foo_instance" { +resource "aws_instance" "test" { ami = "ami-21f78e11" instance_type = "t1.micro" - vpc_security_group_ids = ["${aws_security_group.tf_test_foo.id}"] - subnet_id = "${aws_subnet.foo.id}" + vpc_security_group_ids = ["${aws_security_group.test.id}"] + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = true depends_on = ["aws_internet_gateway.gw"] } -resource "aws_eip" "foo_eip" { - instance = "${aws_instance.foo_instance.id}" +resource "aws_eip" "test_eip" { + instance = "${aws_instance.test.id}" vpc = true depends_on = ["aws_internet_gateway.gw"] } @@ -3266,10 +3569,10 @@ resource "aws_eip" "foo_eip" { func testAccInstanceNetworkInstanceVPCSecurityGroupIDs(rInt int) string { return fmt.Sprintf(` resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -3277,10 +3580,10 @@ resource "aws_vpc" "foo" { } } -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "tf_test_test" { name = "tf_test_%d" - description = "foo" - vpc_id = "${aws_vpc.foo.id}" + description = "test" + vpc_id = "${aws_vpc.test.id}" ingress { protocol = "icmp" @@ -3290,25 +3593,25 @@ resource "aws_security_group" "tf_test_foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-network-vpc-sg-ids" } } -resource "aws_instance" "foo_instance" { +resource "aws_instance" "test" { ami = "ami-21f78e11" instance_type = "t1.micro" - vpc_security_group_ids = ["${aws_security_group.tf_test_foo.id}"] - subnet_id = "${aws_subnet.foo.id}" + vpc_security_group_ids = ["${aws_security_group.tf_test_test.id}"] + subnet_id = "${aws_subnet.test.id}" depends_on = ["aws_internet_gateway.gw"] } -resource "aws_eip" "foo_eip" { - instance = "${aws_instance.foo_instance.id}" +resource "aws_eip" "test_eip" { + instance = "${aws_instance.test.id}" vpc = true depends_on = ["aws_internet_gateway.gw"] } @@ -3318,10 +3621,10 @@ resource "aws_eip" "foo_eip" { func testAccInstanceNetworkInstanceVPCRemoveSecurityGroupIDs(rInt int) string { return fmt.Sprintf(` resource "aws_internet_gateway" "gw" { - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" } -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { @@ -3329,10 +3632,10 @@ resource "aws_vpc" "foo" { } } -resource "aws_security_group" "tf_test_foo" { +resource "aws_security_group" "tf_test_test" { name = "tf_test_%d" - description = "foo" - vpc_id = "${aws_vpc.foo.id}" + description = "test" + vpc_id = "${aws_vpc.test.id}" ingress { protocol = "icmp" @@ -3342,25 +3645,25 @@ resource "aws_security_group" "tf_test_foo" { } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-network-vpc-sg-ids" } } -resource "aws_instance" "foo_instance" { +resource "aws_instance" "test" { ami = "ami-21f78e11" instance_type = "t1.micro" vpc_security_group_ids = [] - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" depends_on = ["aws_internet_gateway.gw"] } -resource "aws_eip" "foo_eip" { - instance = "${aws_instance.foo_instance.id}" +resource "aws_eip" "test_eip" { + instance = "${aws_instance.test.id}" vpc = true depends_on = ["aws_internet_gateway.gw"] } @@ -3377,12 +3680,11 @@ resource "aws_key_pair" "debugging" { key_name = "%s" public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 phodgson@thoughtworks.com" } - -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-408c7f28" instance_type = "t1.micro" key_name = "${aws_key_pair.debugging.key_name}" - + tags = { Name = "testAccInstanceConfigKeyPair_TestAMI" } @@ -3391,26 +3693,26 @@ resource "aws_instance" "foo" { } const testAccInstanceConfigRootBlockDeviceMismatch = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-root-block-device-mismatch" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-root-block-device-mismatch" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { // This is an AMI with RootDeviceName: "/dev/sda1"; actual root: "/dev/sda" ami = "ami-ef5b69df" instance_type = "t1.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" root_block_device { volume_size = 13 } @@ -3418,61 +3720,61 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigForceNewAndTagsDrift = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-force-new-and-tags-drift" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-force-new-and-tags-drift" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.nano" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" } ` const testAccInstanceConfigForceNewAndTagsDrift_Update = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "10.1.0.0/16" tags = { Name = "terraform-testacc-instance-force-new-and-tags-drift" } } -resource "aws_subnet" "foo" { +resource "aws_subnet" "test" { cidr_block = "10.1.1.0/24" - vpc_id = "${aws_vpc.foo.id}" + vpc_id = "${aws_vpc.test.id}" tags = { Name = "tf-acc-instance-force-new-and-tags-drift" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.micro" - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" } ` const testAccInstanceConfigPrimaryNetworkInterface = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-primary-network-iface" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { @@ -3480,34 +3782,34 @@ resource "aws_subnet" "foo" { } } -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" +resource "aws_network_interface" "test" { + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] tags = { Name = "primary_network_interface" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.micro" network_interface { - network_interface_id = "${aws_network_interface.bar.id}" + network_interface_id = "${aws_network_interface.test.id}" device_index = 0 } } ` const testAccInstanceConfigPrimaryNetworkInterfaceSourceDestCheck = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-primary-network-iface-source-dest-check" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { @@ -3515,8 +3817,8 @@ resource "aws_subnet" "foo" { } } -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" +resource "aws_network_interface" "test" { + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] source_dest_check = false tags = { @@ -3524,26 +3826,26 @@ resource "aws_network_interface" "bar" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.micro" network_interface { - network_interface_id = "${aws_network_interface.bar.id}" + network_interface_id = "${aws_network_interface.test.id}" device_index = 0 } } ` const testAccInstanceConfigAddSecondaryNetworkInterfaceBefore = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-add-secondary-network-iface" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { @@ -3552,7 +3854,7 @@ resource "aws_subnet" "foo" { } resource "aws_network_interface" "primary" { - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] tags = { Name = "primary_network_interface" @@ -3560,14 +3862,14 @@ resource "aws_network_interface" "primary" { } resource "aws_network_interface" "secondary" { - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.101"] tags = { Name = "secondary_network_interface" } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.micro" network_interface { @@ -3578,15 +3880,15 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigAddSecondaryNetworkInterfaceAfter = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-add-secondary-network-iface" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { @@ -3595,7 +3897,7 @@ resource "aws_subnet" "foo" { } resource "aws_network_interface" "primary" { - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] tags = { Name = "primary_network_interface" @@ -3604,18 +3906,18 @@ resource "aws_network_interface" "primary" { // Attach previously created network interface, observe no state diff on instance resource resource "aws_network_interface" "secondary" { - subnet_id = "${aws_subnet.foo.id}" + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.101"] tags = { Name = "secondary_network_interface" } attachment { - instance = "${aws_instance.foo.id}" + instance = "${aws_instance.test.id}" device_index = 1 } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" instance_type = "t2.micro" network_interface { @@ -3626,132 +3928,132 @@ resource "aws_instance" "foo" { ` const testAccInstanceConfigAddSecurityGroupBefore = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-add-security-group" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { - Name = "tf-acc-instance-add-security-group-foo" + Name = "tf-acc-instance-add-security-group-test" } } -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.11.0/24" availability_zone = "us-west-2a" tags = { - Name = "tf-acc-instance-add-security-group-bar" + Name = "tf-acc-instance-add-security-group-test2" } } -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" +resource "aws_security_group" "test" { + vpc_id = "${aws_vpc.test.id}" + description = "test" + name = "test" } -resource "aws_security_group" "bar" { - vpc_id = "${aws_vpc.foo.id}" - description = "bar" - name = "bar" +resource "aws_security_group" "test2" { + vpc_id = "${aws_vpc.test.id}" + description = "test2" + name = "test2" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = false vpc_security_group_ids = [ - "${aws_security_group.foo.id}" + "${aws_security_group.test.id}" ] tags = { - Name = "foo-instance-sg-add-test" + Name = "test-instance-sg-add-test" } } -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" +resource "aws_network_interface" "test" { + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] + security_groups = ["${aws_security_group.test.id}"] attachment { - instance = "${aws_instance.foo.id}" + instance = "${aws_instance.test.id}" device_index = 1 } tags = { - Name = "bar_interface" + Name = "test_interface" } } ` const testAccInstanceConfigAddSecurityGroupAfter = ` -resource "aws_vpc" "foo" { +resource "aws_vpc" "test" { cidr_block = "172.16.0.0/16" tags = { Name = "terraform-testacc-instance-add-security-group" } } -resource "aws_subnet" "foo" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.10.0/24" availability_zone = "us-west-2a" tags = { - Name = "tf-acc-instance-add-security-group-foo" + Name = "tf-acc-instance-add-security-group-test" } } -resource "aws_subnet" "bar" { - vpc_id = "${aws_vpc.foo.id}" +resource "aws_subnet" "test2" { + vpc_id = "${aws_vpc.test.id}" cidr_block = "172.16.11.0/24" availability_zone = "us-west-2a" tags = { - Name = "tf-acc-instance-add-security-group-bar" + Name = "tf-acc-instance-add-security-group-test" } } -resource "aws_security_group" "foo" { - vpc_id = "${aws_vpc.foo.id}" - description = "foo" - name = "foo" +resource "aws_security_group" "test" { + vpc_id = "${aws_vpc.test.id}" + description = "test" + name = "test" } -resource "aws_security_group" "bar" { - vpc_id = "${aws_vpc.foo.id}" - description = "bar" - name = "bar" +resource "aws_security_group" "test2" { + vpc_id = "${aws_vpc.test.id}" + description = "test2" + name = "test2" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-c5eabbf5" instance_type = "t2.micro" - subnet_id = "${aws_subnet.bar.id}" + subnet_id = "${aws_subnet.test.id}" associate_public_ip_address = false vpc_security_group_ids = [ - "${aws_security_group.foo.id}", - "${aws_security_group.bar.id}" + "${aws_security_group.test.id}", + "${aws_security_group.test2.id}" ] tags = { - Name = "foo-instance-sg-add-test" + Name = "test-instance-sg-add-test" } } -resource "aws_network_interface" "bar" { - subnet_id = "${aws_subnet.foo.id}" +resource "aws_network_interface" "test" { + subnet_id = "${aws_subnet.test.id}" private_ips = ["172.16.10.100"] - security_groups = ["${aws_security_group.foo.id}"] + security_groups = ["${aws_security_group.test.id}"] attachment { - instance = "${aws_instance.foo.id}" + instance = "${aws_instance.test.id}" device_index = 1 } tags = { - Name = "bar_interface" + Name = "test_interface" } } ` @@ -3777,7 +4079,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3810,7 +4112,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3843,7 +4145,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3877,7 +4179,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3911,7 +4213,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3945,7 +4247,7 @@ resource "aws_subnet" "public_subnet" { } } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.public_subnet.id}" @@ -3971,15 +4273,15 @@ data "aws_ami" "win2016core" { } } -resource "aws_key_pair" "foo" { +resource "aws_key_pair" "test" { key_name = "tf-acctest-%d" public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAq6U3HQYC4g8WzU147gZZ7CKQH8TgYn3chZGRPxaGmHW1RUwsyEs0nmombmIhwxudhJ4ehjqXsDLoQpd6+c7BuLgTMvbv8LgE9LX53vnljFe1dsObsr/fYLvpU9LTlo8HgHAqO5ibNdrAUvV31ronzCZhms/Gyfdaue88Fd0/YnsZVGeOZPayRkdOHSpqme2CBrpa8myBeL1CWl0LkDG4+YCURjbaelfyZlIApLYKy3FcCan9XQFKaL32MJZwCgzfOvWIMtYcU8QtXMgnA3/I3gXk8YDUJv5P4lj0s/PJXuTM8DygVAUtebNwPuinS7wwonm5FXcWMuVGsVpG5K7FGQ== tf-acc-winpasswordtest" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "${data.aws_ami.win2016core.id}" instance_type = "t2.medium" - key_name = "${aws_key_pair.foo.key_name}" + key_name = "${aws_key_pair.test.key_name}" get_password_data = %t } @@ -4081,7 +4383,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4105,7 +4407,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-51537029" # us-west-2 instance_type = "t3.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4130,7 +4432,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4158,7 +4460,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-51537029" # us-west-2 instance_type = "t3.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4186,7 +4488,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "t2.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4214,7 +4516,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-51537029" # us-west-2 instance_type = "t3.micro" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4242,7 +4544,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-22b9a343" # us-west-2 instance_type = "m1.small" subnet_id = "${aws_subnet.my_subnet.id}" @@ -4270,7 +4572,7 @@ resource "aws_subnet" "my_subnet" { availability_zone = "us-west-2a" } -resource "aws_instance" "foo" { +resource "aws_instance" "test" { ami = "ami-51537029" # us-west-2 instance_type = %q subnet_id = "${aws_subnet.my_subnet.id}" From b3d53ffae69c562e179a70d5426ed44b892223d1 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 28 Oct 2019 12:16:47 +0100 Subject: [PATCH 33/55] import test lintignores for lightsail key pairs --- aws/resource_aws_lightsail_key_pair_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/aws/resource_aws_lightsail_key_pair_test.go b/aws/resource_aws_lightsail_key_pair_test.go index beb220e073c..42bf4c83d0c 100644 --- a/aws/resource_aws_lightsail_key_pair_test.go +++ b/aws/resource_aws_lightsail_key_pair_test.go @@ -36,6 +36,7 @@ func TestAccAWSLightsailKeyPair_basic(t *testing.T) { }) } +//lintignore: AT002 func TestAccAWSLightsailKeyPair_imported(t *testing.T) { var conf lightsail.KeyPair lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) From 9f1aa266a999406abdb671d7f43170d3ec19ccf5 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Mon, 28 Oct 2019 13:54:31 +0100 Subject: [PATCH 34/55] Import test refactor for SNS topic --- aws/resource_aws_sns_topic_test.go | 192 ++++++++++++++++------------- 1 file changed, 105 insertions(+), 87 deletions(-) diff --git a/aws/resource_aws_sns_topic_test.go b/aws/resource_aws_sns_topic_test.go index eb34401f36c..229b1125335 100644 --- a/aws/resource_aws_sns_topic_test.go +++ b/aws/resource_aws_sns_topic_test.go @@ -14,126 +14,128 @@ import ( awspolicy "github.com/jen20/awspolicyequivalence" ) -func TestAccAWSSNSTopic_importBasic(t *testing.T) { - resourceName := "aws_sns_topic.test_topic" - rName := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSSNSTopicDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSSNSTopicConfig_withName(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSSNSTopic_basic(t *testing.T) { attributes := make(map[string]string) + resourceName := "aws_sns_topic.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withGeneratedName, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), + testAccCheckAWSSNSTopicExists(resourceName, attributes), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSSNSTopic_name(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withName(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), + testAccCheckAWSSNSTopicExists(resourceName, attributes), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSSNSTopic_namePrefix(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" startsWithPrefix := regexp.MustCompile("^terraform-test-topic-") resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withNamePrefix(), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "name", startsWithPrefix), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestMatchResourceAttr(resourceName, "name", startsWithPrefix), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, }, }) } func TestAccAWSSNSTopic_policy(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) expectedPolicy := `{"Statement":[{"Sid":"Stmt1445931846145","Effect":"Allow","Principal":{"AWS":"*"},"Action":"sns:Publish","Resource":"arn:aws:sns:us-west-2::example"}],"Version":"2012-10-17","Id":"Policy1445931846145"}` + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicWithPolicy(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - testAccCheckAWSNSTopicHasPolicy("aws_sns_topic.test_topic", expectedPolicy), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + testAccCheckAWSNSTopicHasPolicy(resourceName, expectedPolicy), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSSNSTopic_withIAMRole(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withIAMRole(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), + testAccCheckAWSSNSTopicExists(resourceName, attributes), ), }, }, @@ -144,7 +146,7 @@ func TestAccAWSSNSTopic_withFakeIAMRole(t *testing.T) { rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: "aws_sns_topic.test", Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ @@ -158,29 +160,35 @@ func TestAccAWSSNSTopic_withFakeIAMRole(t *testing.T) { func TestAccAWSSNSTopic_withDeliveryPolicy(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) expectedPolicy := `{"http":{"defaultHealthyRetryPolicy": {"minDelayTarget": 20,"maxDelayTarget": 20,"numMaxDelayRetries": 0,"numRetries": 3,"numNoDelayRetries": 0,"numMinDelayRetries": 0,"backoffFunction": "linear"},"disableSubscriptionOverrides": false}}` + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withDeliveryPolicy(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - testAccCheckAWSNSTopicHasDeliveryPolicy("aws_sns_topic.test_topic", expectedPolicy), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + testAccCheckAWSNSTopicHasDeliveryPolicy(resourceName, expectedPolicy), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSSNSTopic_deliveryStatus(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) arnRegex := regexp.MustCompile("^arn:aws:iam::[0-9]{12}:role/sns-delivery-status-role-") expectedAttributes := map[string]*regexp.Regexp{ @@ -200,27 +208,27 @@ func TestAccAWSSNSTopic_deliveryStatus(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_deliveryStatus(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), + testAccCheckAWSSNSTopicExists(resourceName, attributes), testAccCheckAWSSNSTopicAttributes(attributes, expectedAttributes), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "application_success_feedback_role_arn", arnRegex), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "application_success_feedback_sample_rate", "100"), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "application_failure_feedback_role_arn", arnRegex), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "lambda_success_feedback_role_arn", arnRegex), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "lambda_success_feedback_sample_rate", "90"), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "lambda_failure_feedback_role_arn", arnRegex), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "http_success_feedback_role_arn", arnRegex), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "http_success_feedback_sample_rate", "80"), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "http_failure_feedback_role_arn", arnRegex), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "sqs_success_feedback_role_arn", arnRegex), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "sqs_success_feedback_sample_rate", "70"), - resource.TestMatchResourceAttr("aws_sns_topic.test_topic", "sqs_failure_feedback_role_arn", arnRegex), + resource.TestMatchResourceAttr(resourceName, "application_success_feedback_role_arn", arnRegex), + resource.TestCheckResourceAttr(resourceName, "application_success_feedback_sample_rate", "100"), + resource.TestMatchResourceAttr(resourceName, "application_failure_feedback_role_arn", arnRegex), + resource.TestMatchResourceAttr(resourceName, "lambda_success_feedback_role_arn", arnRegex), + resource.TestCheckResourceAttr(resourceName, "lambda_success_feedback_sample_rate", "90"), + resource.TestMatchResourceAttr(resourceName, "lambda_failure_feedback_role_arn", arnRegex), + resource.TestMatchResourceAttr(resourceName, "http_success_feedback_role_arn", arnRegex), + resource.TestCheckResourceAttr(resourceName, "http_success_feedback_sample_rate", "80"), + resource.TestMatchResourceAttr(resourceName, "http_failure_feedback_role_arn", arnRegex), + resource.TestMatchResourceAttr(resourceName, "sqs_success_feedback_role_arn", arnRegex), + resource.TestCheckResourceAttr(resourceName, "sqs_success_feedback_sample_rate", "70"), + resource.TestMatchResourceAttr(resourceName, "sqs_failure_feedback_role_arn", arnRegex), ), }, }, @@ -229,27 +237,32 @@ func TestAccAWSSNSTopic_deliveryStatus(t *testing.T) { func TestAccAWSSNSTopic_encryption(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfig_withEncryption(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "kms_master_key_id", "alias/aws/sns"), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "kms_master_key_id", "alias/aws/sns"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSSNSTopicConfig_withName(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "kms_master_key_id", ""), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "kms_master_key_id", ""), ), }, }, @@ -258,38 +271,43 @@ func TestAccAWSSNSTopic_encryption(t *testing.T) { func TestAccAWSSNSTopic_tags(t *testing.T) { attributes := make(map[string]string) - + resourceName := "aws_sns_topic.test" rName := acctest.RandString(10) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, - IDRefreshName: "aws_sns_topic.test_topic", + IDRefreshName: resourceName, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSNSTopicDestroy, Steps: []resource.TestStep{ { Config: testAccAWSSNSTopicConfigTags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.key1", "value1"), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSSNSTopicConfigTags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.key1", "value1updated"), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.key2", "value2"), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, { Config: testAccAWSSNSTopicConfigTags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSNSTopicExists("aws_sns_topic.test_topic", attributes), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_sns_topic.test_topic", "tags.key2", "value2"), + testAccCheckAWSSNSTopicExists(resourceName, attributes), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), }, }, @@ -455,12 +473,12 @@ func testAccCheckAWSSNSTopicExists(n string, attributes map[string]string) resou } const testAccAWSSNSTopicConfig_withGeneratedName = ` -resource "aws_sns_topic" "test_topic" {} +resource "aws_sns_topic" "test" {} ` func testAccAWSSNSTopicConfig_withName(r string) string { return fmt.Sprintf(` -resource "aws_sns_topic" "test_topic" { +resource "aws_sns_topic" "test" { name = "terraform-test-topic-%s" } `, r) @@ -468,7 +486,7 @@ resource "aws_sns_topic" "test_topic" { func testAccAWSSNSTopicConfig_withNamePrefix() string { return ` -resource "aws_sns_topic" "test_topic" { +resource "aws_sns_topic" "test" { name_prefix = "terraform-test-topic-" } ` @@ -476,7 +494,7 @@ resource "aws_sns_topic" "test_topic" { func testAccAWSSNSTopicWithPolicy(r string) string { return fmt.Sprintf(` -resource "aws_sns_topic" "test_topic" { +resource "aws_sns_topic" "test" { name = "example-%s" policy = < Date: Mon, 28 Oct 2019 16:09:31 +0000 Subject: [PATCH 35/55] Update module golangci/golangci-lint to v1.21.0 --- go.mod | 2 +- go.sum | 12 +- vendor/github.com/bombsimon/wsl/wsl.go | 107 ++++-- vendor/github.com/gofrs/flock/.gitignore | 24 ++ vendor/github.com/gofrs/flock/.travis.yml | 10 + vendor/github.com/gofrs/flock/LICENSE | 27 ++ vendor/github.com/gofrs/flock/README.md | 41 +++ vendor/github.com/gofrs/flock/appveyor.yml | 25 ++ vendor/github.com/gofrs/flock/flock.go | 127 ++++++++ vendor/github.com/gofrs/flock/flock_unix.go | 195 +++++++++++ vendor/github.com/gofrs/flock/flock_winapi.go | 76 +++++ .../github.com/gofrs/flock/flock_windows.go | 140 ++++++++ .../golangci-lint/internal/cache/cache.go | 123 ++++--- .../golangci-lint/internal/cache/default.go | 4 +- .../golangci-lint/internal/cache/hash.go | 26 +- .../internal/pkgcache/pkgcache.go | 108 ++++-- .../golangci-lint/pkg/commands/cache.go | 86 +++++ .../golangci-lint/pkg/commands/executor.go | 100 ++++++ .../golangci-lint/pkg/commands/root.go | 1 + .../golangci-lint/pkg/commands/run.go | 29 +- .../golangci-lint/pkg/config/config.go | 16 +- .../golangci-lint/pkg/exitcodes/exitcodes.go | 2 + .../golangci-lint/pkg/fsutils/filecache.go | 8 +- .../golangci-lint/pkg/golinters/deadcode.go | 12 +- .../golangci-lint/pkg/golinters/depguard.go | 12 +- .../golangci-lint/pkg/golinters/dogsled.go | 12 +- .../golangci-lint/pkg/golinters/dupl.go | 12 +- .../golangci-lint/pkg/golinters/errcheck.go | 12 +- .../golangci-lint/pkg/golinters/funlen.go | 12 +- .../pkg/golinters/goanalysis/issue.go | 29 ++ .../pkg/golinters/goanalysis/linter.go | 307 ++++++++++++++++-- .../pkg/golinters/goanalysis/metalinter.go | 100 ++---- .../pkg/golinters/goanalysis/runner.go | 82 ++--- .../pkg/golinters/gochecknoglobals.go | 13 +- .../pkg/golinters/gochecknoinits.go | 13 +- .../golangci-lint/pkg/golinters/gocognit.go | 10 +- .../golangci-lint/pkg/golinters/goconst.go | 14 +- .../golangci-lint/pkg/golinters/gocritic.go | 12 +- .../golangci-lint/pkg/golinters/gocyclo.go | 12 +- .../golangci-lint/pkg/golinters/godox.go | 12 +- .../golangci-lint/pkg/golinters/gofmt.go | 13 +- .../golangci-lint/pkg/golinters/goimports.go | 13 +- .../golangci-lint/pkg/golinters/golint.go | 20 +- .../golangci-lint/pkg/golinters/gosec.go | 12 +- .../pkg/golinters/ineffassign.go | 12 +- .../golangci-lint/pkg/golinters/interfacer.go | 12 +- .../golangci-lint/pkg/golinters/lll.go | 12 +- .../golangci-lint/pkg/golinters/maligned.go | 12 +- .../golangci-lint/pkg/golinters/misspell.go | 12 +- .../golangci-lint/pkg/golinters/nakedret.go | 12 +- .../golangci-lint/pkg/golinters/prealloc.go | 12 +- .../golangci-lint/pkg/golinters/scopelint.go | 10 +- .../pkg/golinters/structcheck.go | 12 +- .../golangci-lint/pkg/golinters/typecheck.go | 2 +- .../golangci-lint/pkg/golinters/unconvert.go | 12 +- .../golangci-lint/pkg/golinters/unparam.go | 12 +- .../golangci-lint/pkg/golinters/unused.go | 18 +- .../golangci-lint/pkg/golinters/varcheck.go | 12 +- .../golangci-lint/pkg/golinters/whitespace.go | 10 +- .../golangci-lint/pkg/golinters/wsl.go | 20 +- .../pkg/lint/lintersdb/manager.go | 1 + .../golangci/golangci-lint/pkg/lint/runner.go | 7 +- .../golangci-lint/pkg/logutils/log.go | 10 +- .../golangci-lint/pkg/printers/checkstyle.go | 3 +- .../golangci-lint/pkg/printers/codeclimate.go | 3 +- .../golangci-lint/pkg/printers/junitxml.go | 3 +- .../golangci-lint/pkg/printers/tab.go | 5 +- .../golangci-lint/pkg/printers/text.go | 9 +- .../golangci-lint/pkg/result/issue.go | 18 +- .../pkg/result/processors/fixer.go | 24 +- .../pkg/result/processors/utils.go | 23 +- .../golangci-lint/pkg/timeutils/stopwatch.go | 42 ++- vendor/github.com/golangci/lint-1/lint.go | 27 +- .../go/analysis/passes/asmdecl/asmdecl.go | 5 - .../go/analysis/passes/findcall/findcall.go | 9 +- .../x/tools/go/gcexportdata/gcexportdata.go | 2 +- .../x/tools/go/internal/gcimporter/iexport.go | 9 +- .../x/tools/go/internal/gcimporter/iimport.go | 8 + vendor/golang.org/x/tools/go/ssa/func.go | 8 +- vendor/modules.txt | 10 +- 80 files changed, 1906 insertions(+), 535 deletions(-) create mode 100644 vendor/github.com/gofrs/flock/.gitignore create mode 100644 vendor/github.com/gofrs/flock/.travis.yml create mode 100644 vendor/github.com/gofrs/flock/LICENSE create mode 100644 vendor/github.com/gofrs/flock/README.md create mode 100644 vendor/github.com/gofrs/flock/appveyor.yml create mode 100644 vendor/github.com/gofrs/flock/flock.go create mode 100644 vendor/github.com/gofrs/flock/flock_unix.go create mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go create mode 100644 vendor/github.com/gofrs/flock/flock_windows.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go create mode 100644 vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go diff --git a/go.mod b/go.mod index fc50985d49d..1823675ebc8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/beevik/etree v1.1.0 github.com/bflad/tfproviderlint v0.5.0 github.com/client9/misspell v0.3.4 - github.com/golangci/golangci-lint v1.20.0 + github.com/golangci/golangci-lint v1.21.0 github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect github.com/hashicorp/aws-sdk-go-base v0.4.0 github.com/hashicorp/go-cleanhttp v0.5.1 diff --git a/go.sum b/go.sum index 41ae3465780..d9348733bfd 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,7 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1U github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bombsimon/wsl v1.2.1 h1:DcLf3V66dJi4a+KHt+F1FdOeBZ05adHqTMYFvjgv06k= -github.com/bombsimon/wsl v1.2.1/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= +github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= @@ -117,6 +116,7 @@ github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2X github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -152,12 +152,10 @@ github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70u github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.20.0 h1:bokjFHRgut7TT9YiKSVWVscWfZNs1fQgSz94geGPHJw= -github.com/golangci/golangci-lint v1.20.0/go.mod h1:WhcG5dpuf94TEpqUBk95qBEZ599UcAvuGf02g47CdXU= +github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190930103755-fad67e08aa89 h1:664ewjIQUXDvinFMbAsoH2V2Yvaro/X8BoYpIMTWGXI= -github.com/golangci/lint-1 v0.0.0-20190930103755-fad67e08aa89/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= @@ -567,6 +565,8 @@ golang.org/x/tools v0.0.0-20190823093517-aa644d2adf2a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0 h1:7+F62GGWUowoiJOUDivedlBECd/fTeUDJnCu0JetQO0= golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs= +golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= diff --git a/vendor/github.com/bombsimon/wsl/wsl.go b/vendor/github.com/bombsimon/wsl/wsl.go index 3dcc32b196d..fe62b62c602 100644 --- a/vendor/github.com/bombsimon/wsl/wsl.go +++ b/vendor/github.com/bombsimon/wsl/wsl.go @@ -7,6 +7,7 @@ import ( "go/token" "io/ioutil" "reflect" + "strings" ) type Configuration struct { @@ -49,6 +50,29 @@ type Configuration struct { // } AllowMultiLineAssignCuddle bool + // AllowCaseTrailingWhitespace will allow case blocks to end with a + // whitespace. Sometimes this might actually improve readability. This + // defaults to false but setting it to true will enable the following + // example: + // switch { + // case 1: + // fmt.Println(1) + // + // case 2: + // fmt.Println(2) + // + // case 3: + // fmt:println(3) + // } + AllowCaseTrailingWhitespace bool + + // AllowCuddleDeclaration will allow multiple var/declaration statements to + // be cuddled. This defaults to false but setting it to true will enable the + // following example: + // var foo bool + // var err error + AllowCuddleDeclaration bool + // AllowCuddleWithCalls is a list of call idents that everything can be // cuddled with. Defaults to calls looking like locks to support a flow like // this: @@ -69,11 +93,12 @@ type Configuration struct { // DefaultConfig returns default configuration func DefaultConfig() Configuration { return Configuration{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowMultiLineAssignCuddle: true, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowMultiLineAssignCuddle: true, + AllowCaseTrailingWhitespace: false, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, } } @@ -113,6 +138,7 @@ func NewProcessor() *Processor { // ProcessFiles takes a string slice with file names (full paths) and lints // them. +// nolint: gocritic func (p *Processor) ProcessFiles(filenames []string) ([]Result, []string) { for _, filename := range filenames { data, err := ioutil.ReadFile(filename) @@ -147,7 +173,7 @@ func (p *Processor) process(filename string, data []byte) { for _, d := range p.file.Decls { switch v := d.(type) { case *ast.FuncDecl: - p.parseBlockBody(v.Body) + p.parseBlockBody(v.Name, v.Body) case *ast.GenDecl: // `go fmt` will handle proper spacing for GenDecl such as imports, // constants etc. @@ -159,14 +185,14 @@ func (p *Processor) process(filename string, data []byte) { // parseBlockBody will parse any kind of block statements such as switch cases // and if statements. A list of Result is returned. -func (p *Processor) parseBlockBody(block *ast.BlockStmt) { +func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { // Nothing to do if there's no value. if reflect.ValueOf(block).IsNil() { return } // Start by finding leading and trailing whitespaces. - p.findLeadingAndTrailingWhitespaces(block, nil) + p.findLeadingAndTrailingWhitespaces(ident, block, nil) // Parse the block body contents. p.parseBlockStatements(block.List) @@ -182,7 +208,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { if as, isAssignStmt := stmt.(*ast.AssignStmt); isAssignStmt { for _, rhs := range as.Rhs { if fl, isFuncLit := rhs.(*ast.FuncLit); isFuncLit { - p.parseBlockBody(fl.Body) + p.parseBlockBody(nil, fl.Body) } } } @@ -244,14 +270,6 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { calledOrAssignedOnLineAbove = append(calledOnLineAbove, assignedOnLineAbove...) ) - /* - DEBUG: - fmt.Println("LHS: ", leftHandSide) - fmt.Println("RHS: ", rightHandSide) - fmt.Println("Assigned above: ", assignedOnLineAbove) - fmt.Println("Assigned first: ", assignedFirstInBlock) - */ - // If we called some kind of lock on the line above we allow cuddling // anything. if atLeastOneInListsMatch(calledOnLineAbove, p.config.AllowCuddleWithCalls) { @@ -282,6 +300,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // t.X = true // return t // } + // nolint: gocritic if i == len(statements)-1 && i == 1 { if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { return true @@ -351,7 +370,9 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { p.addError(t.Pos(), "assignments should only be cuddled with other assignments") case *ast.DeclStmt: - p.addError(t.Pos(), "declarations should never be cuddled") + if !p.config.AllowCuddleDeclaration { + p.addError(t.Pos(), "declarations should never be cuddled") + } case *ast.ExprStmt: switch previousStatement.(type) { case *ast.DeclStmt, *ast.ReturnStmt: @@ -394,6 +415,25 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } + // Special treatment of deferring body closes after error checking + // according to best practices. See + // https://github.com/bombsimon/wsl/issues/31 which links to + // discussion about error handling after HTTP requests. This is hard + // coded and very specific but for now this is to be seen as a + // special case. What this does is that it *only* allows a defer + // statement with `Close` on the right hand side to be cuddled with + // an if-statement to support this: + // resp, err := client.Do(req) + // if err != nil { + // return err + // } + // defer resp.Body.Close() + if _, ok := previousStatement.(*ast.IfStmt); ok { + if atLeastOneInListsMatch(rightHandSide, []string{"Close"}) { + continue + } + } + if moreThanOneStatementAbove() { p.addError(t.Pos(), "only one cuddle assignment allowed before defer statement") @@ -517,7 +557,7 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { } } - p.parseBlockBody(statementBodyContent) + p.parseBlockBody(nil, statementBodyContent) case []ast.Stmt: // The Body field for an *ast.CaseClause or *ast.CommClause is of type // []ast.Stmt. We must check leading and trailing whitespaces and then @@ -530,7 +570,7 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { nextStatement = allStmt[i+1] } - p.findLeadingAndTrailingWhitespaces(stmt, nextStatement) + p.findLeadingAndTrailingWhitespaces(nil, stmt, nextStatement) p.parseBlockStatements(statementBodyContent) default: p.addWarning( @@ -664,6 +704,13 @@ func (p *Processor) findRHS(node ast.Node) []string { return p.findRHS(t.Call) case *ast.SendStmt: return p.findLHS(t.Value) + case *ast.IndexExpr: + rhs = append(rhs, p.findRHS(t.Index)...) + rhs = append(rhs, p.findRHS(t.X)...) + case *ast.SliceExpr: + rhs = append(rhs, p.findRHS(t.X)...) + rhs = append(rhs, p.findRHS(t.Low)...) + rhs = append(rhs, p.findRHS(t.High)...) default: if x, ok := maybeX(t); ok { return p.findRHS(x) @@ -679,7 +726,7 @@ func (p *Processor) findRHS(node ast.Node) []string { // if it exists. If the node doesn't have an X field nil and false is returned. // Known fields with X that are handled: // IndexExpr, ExprStmt, SelectorExpr, StarExpr, ParentExpr, TypeAssertExpr, -// RangeStmt, UnaryExpr, ParenExpr, SLiceExpr, IncDecStmt. +// RangeStmt, UnaryExpr, ParenExpr, SliceExpr, IncDecStmt. func maybeX(node interface{}) (ast.Node, bool) { maybeHasX := reflect.Indirect(reflect.ValueOf(node)).FieldByName("X") if !maybeHasX.IsValid() { @@ -726,7 +773,8 @@ func atLeastOneInListsMatch(listOne, listTwo []string) bool { // findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces // in a node. The method takes comments in consideration which will make the // parser more gentle. -func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.Node) { +// nolint: gocognit +func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { var ( allowedLinesBeforeFirstStatement = 1 commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) @@ -811,11 +859,16 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No return } + // If we allow case to end white whitespace just return. + if p.config.AllowCaseTrailingWhitespace { + return + } + switch n := nextStatement.(type) { case *ast.CaseClause: - blockEndPos = n.Colon + blockEndPos = n.Case case *ast.CommClause: - blockEndPos = n.Colon + blockEndPos = n.Case default: // We're not at the end of the case? return @@ -824,7 +877,7 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No blockEndLine = p.fileSet.Position(blockEndPos).Line } - if p.nodeEnd(lastStatement) != blockEndLine-1 { + if p.nodeEnd(lastStatement) != blockEndLine-1 && !isExampleFunc(ident) { p.addError( blockEndPos, "block should not end with a whitespace (or comment)", @@ -832,6 +885,10 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No } } +func isExampleFunc(ident *ast.Ident) bool { + return ident != nil && strings.HasPrefix(ident.Name, "Example") +} + func (p *Processor) nodeStart(node ast.Node) int { return p.fileSet.Position(node.Pos()).Line } diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml new file mode 100644 index 00000000000..b791a74213c --- /dev/null +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.10.x + - 1.11.x +script: go test -v -check.vv -race ./... +sudo: false +notifications: + email: + on_success: never + on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE new file mode 100644 index 00000000000..aff7d358e24 --- /dev/null +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md new file mode 100644 index 00000000000..71ce63692e8 --- /dev/null +++ b/vendor/github.com/gofrs/flock/README.md @@ -0,0 +1,41 @@ +# flock +[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/gofrs/flock +``` + +## Usage +```Go +import "github.com/gofrs/flock" + +fileLock := flock.New("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml new file mode 100644 index 00000000000..6848e94bf88 --- /dev/null +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\gofrs\flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go new file mode 100644 index 00000000000..8f109b8a967 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock.go @@ -0,0 +1,127 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidentally drop the exclusive lock. +package flock + +import ( + "context" + "os" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// New returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func New(path string) *Flock { + return &Flock{path: path} +} + +// NewFlock returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +// +// Deprecated: Use New instead. +func NewFlock(path string) *Flock { + return New(path) +} + +// Close is equivalent to calling Unlock. +// +// This will release the lock and close the underlying file descriptor. +// It will not remove the file from disk, that's up to your application. +func (f *Flock) Close() error { + return f.Unlock() +} + +// Path returns the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked returns the lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked returns the read lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryLock, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryRLock, retryDelay) +} + +func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go new file mode 100644 index 00000000000..45f71a707c3 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -0,0 +1,195 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !windows + +package flock + +import ( + "os" + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + shouldRetry, reopenErr := f.reopenFDOnError(err) + if reopenErr != nil { + return reopenErr + } + + if !shouldRetry { + return err + } + + if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + var retried bool +retry: + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + if !retried { + if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { + return false, reopenErr + } else if shouldRetry { + retried = true + goto retry + } + } + + return false, err +} + +// reopenFDOnError determines whether we should reopen the file handle +// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: +// Since Linux 3.4 (commit 55725513) +// Probably NFSv4 where flock() is emulated by fcntl(). +func (f *Flock) reopenFDOnError(err error) (bool, error) { + if err != syscall.EIO && err != syscall.EBADF { + return false, nil + } + if st, err := f.fh.Stat(); err == nil { + // if the file is able to be read and written + if st.Mode()&0600 == 0600 { + f.fh.Close() + f.fh = nil + + // reopen in read-write mode and set the filehandle + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return false, err + } + f.fh = fh + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go new file mode 100644 index 00000000000..fe405a255ae --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go new file mode 100644 index 00000000000..9f4a5f10d24 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -0,0 +1,140 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go index 35d6155acaf..9ac140c50ea 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -12,7 +12,6 @@ import ( "bytes" "crypto/sha256" "encoding/hex" - "errors" "fmt" "io" "io/ioutil" @@ -22,6 +21,8 @@ import ( "strings" "time" + "github.com/pkg/errors" + "github.com/golangci/golangci-lint/internal/renameio" ) @@ -144,28 +145,35 @@ func (c *Cache) get(id ActionID) (Entry, error) { missing := func() (Entry, error) { return Entry{}, errMissing } - f, err := os.Open(c.fileName(id, "a")) + failed := func(err error) (Entry, error) { + return Entry{}, err + } + fileName := c.fileName(id, "a") + f, err := os.Open(fileName) if err != nil { - return missing() + if os.IsNotExist(err) { + return missing() + } + return failed(err) } defer f.Close() entry := make([]byte, entrySize+1) // +1 to detect whether f is too long - if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { - return missing() + if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF { + return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr)) } if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { - return missing() + return failed(fmt.Errorf("bad data in %s", fileName)) } eid, entry := entry[3:3+hexSize], entry[3+hexSize:] eout, entry := entry[1:1+hexSize], entry[1+hexSize:] esize, entry := entry[1:1+20], entry[1+20:] - etime, entry := entry[1:1+20], entry[1+20:] + etime := entry[1 : 1+20] var buf [HashSize]byte - if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { - return missing() + if _, err = hex.Decode(buf[:], eid); err != nil || buf != id { + return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName)) } - if _, err := hex.Decode(buf[:], eout); err != nil { - return missing() + if _, err = hex.Decode(buf[:], eout); err != nil { + return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName)) } i := 0 for i < len(esize) && esize[i] == ' ' { @@ -173,7 +181,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { } size, err := strconv.ParseInt(string(esize[i:]), 10, 64) if err != nil || size < 0 { - return missing() + return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err)) } i = 0 for i < len(etime) && etime[i] == ' ' { @@ -181,10 +189,12 @@ func (c *Cache) get(id ActionID) (Entry, error) { } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) if err != nil || tm < 0 { - return missing() + return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err)) } - c.used(c.fileName(id, "a")) + if err = c.used(fileName); err != nil { + return failed(errors.Wrapf(err, "failed to mark %s as used", fileName)) + } return Entry{buf, size, time.Unix(0, tm)}, nil } @@ -196,7 +206,12 @@ func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { if err != nil { return "", Entry{}, err } - file = c.OutputFile(entry.OutputID) + + file, err = c.OutputFile(entry.OutputID) + if err != nil { + return "", Entry{}, err + } + info, err := os.Stat(file) if err != nil || info.Size() != entry.Size { return "", Entry{}, errMissing @@ -212,7 +227,16 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { if err != nil { return nil, entry, err } - data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) + outputFile, err := c.OutputFile(entry.OutputID) + if err != nil { + return nil, entry, err + } + + data, err := ioutil.ReadFile(outputFile) + if err != nil { + return nil, entry, err + } + if sha256.Sum256(data) != entry.OutputID { return nil, entry, errMissing } @@ -220,10 +244,12 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { } // OutputFile returns the name of the cache file storing output with the given OutputID. -func (c *Cache) OutputFile(out OutputID) string { +func (c *Cache) OutputFile(out OutputID) (string, error) { file := c.fileName(out, "d") - c.used(file) - return file + if err := c.used(file); err != nil { + return "", err + } + return file, nil } // Time constants for cache expiration. @@ -253,12 +279,21 @@ const ( // mtime is more than an hour old. This heuristic eliminates // nearly all of the mtime updates that would otherwise happen, // while still keeping the mtimes useful for cache trimming. -func (c *Cache) used(file string) { +func (c *Cache) used(file string) error { info, err := os.Stat(file) - if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { - return + if err != nil { + return errors.Wrapf(err, "failed to stat file %s", file) } - os.Chtimes(file, c.now(), c.now()) + + if c.now().Sub(info.ModTime()) < mtimeInterval { + return nil + } + + if err := os.Chtimes(file, c.now(), c.now()); err != nil { + return errors.Wrapf(err, "failed to change time of file %s", file) + } + + return nil } // Trim removes old cache entries that are likely not to be reused. @@ -285,7 +320,7 @@ func (c *Cache) Trim() { // Ignore errors from here: if we don't write the complete timestamp, the // cache will appear older than it is, and we'll trim it again next time. - renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) + _ = renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) } // trimSubdir trims a single cache subdirectory. @@ -367,7 +402,9 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify os.Remove(file) return err } - os.Chtimes(file, c.now(), c.now()) // mainly for tests + if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", file) + } return nil } @@ -421,9 +458,12 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { info, err := os.Stat(name) if err == nil && info.Size() == size { // Check hash. - if f, err := os.Open(name); err == nil { + if f, openErr := os.Open(name); openErr == nil { h := sha256.New() - io.Copy(h, f) + if _, copyErr := io.Copy(h, f); copyErr != nil { + return errors.Wrap(copyErr, "failed to copy to sha256") + } + f.Close() var out2 OutputID h.Sum(out2[:0]) @@ -456,44 +496,49 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { // before returning, to avoid leaving bad bytes in the file. // Copy file to f, but also into h to double-check hash. - if _, err := file.Seek(0, 0); err != nil { - f.Truncate(0) + if _, err = file.Seek(0, 0); err != nil { + _ = f.Truncate(0) return err } h := sha256.New() w := io.MultiWriter(f, h) - if _, err := io.CopyN(w, file, size-1); err != nil { - f.Truncate(0) + if _, err = io.CopyN(w, file, size-1); err != nil { + _ = f.Truncate(0) return err } // Check last byte before writing it; writing it will make the size match // what other processes expect to find and might cause them to start // using the file. buf := make([]byte, 1) - if _, err := file.Read(buf); err != nil { - f.Truncate(0) + if _, err = file.Read(buf); err != nil { + _ = f.Truncate(0) return err } - h.Write(buf) + if n, wErr := h.Write(buf); n != len(buf) { + return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr) + } + sum := h.Sum(nil) if !bytes.Equal(sum, out[:]) { - f.Truncate(0) + _ = f.Truncate(0) return fmt.Errorf("file content changed underfoot") } // Commit cache file entry. - if _, err := f.Write(buf); err != nil { - f.Truncate(0) + if _, err = f.Write(buf); err != nil { + _ = f.Truncate(0) return err } - if err := f.Close(); err != nil { + if err = f.Close(); err != nil { // Data might not have been written, // but file may look like it is the right size. // To be extra careful, remove cached file. os.Remove(name) return err } - os.Chtimes(name, c.now(), c.now()) // mainly for tests + if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", name) + } return nil } diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go index 162bcc972a8..e8866cb30cc 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go @@ -39,7 +39,9 @@ func initDefaultCache() { } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. - ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) + if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { + log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err) + } } c, err := Open(dir) diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go index a42f149c712..4ce79e325b2 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go @@ -42,11 +42,19 @@ func SetSalt(b []byte) { // Subkey returns an action ID corresponding to mixing a parent // action ID with a string description of the subkey. -func Subkey(parent ActionID, desc string) ActionID { +func Subkey(parent ActionID, desc string) (ActionID, error) { h := sha256.New() - h.Write([]byte("subkey:")) - h.Write(parent[:]) - h.Write([]byte(desc)) + const subkeyPrefix = "subkey:" + if n, err := h.Write([]byte(subkeyPrefix)); n != len(subkeyPrefix) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of subkey prefix with error %s", n, len(subkeyPrefix), err) + } + if n, err := h.Write(parent[:]); n != len(parent) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of parent with error %s", n, len(parent), err) + } + if n, err := h.Write([]byte(desc)); n != len(desc) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of desc with error %s", n, len(desc), err) + } + var out ActionID h.Sum(out[:0]) if debugHash { @@ -57,21 +65,23 @@ func Subkey(parent ActionID, desc string) ActionID { hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) hashDebug.Unlock() } - return out + return out, nil } // NewHash returns a new Hash. // The caller is expected to Write data to it and then call Sum. -func NewHash(name string) *Hash { +func NewHash(name string) (*Hash, error) { h := &Hash{h: sha256.New(), name: name} if debugHash { fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) } - h.Write(hashSalt) + if n, err := h.Write(hashSalt); n != len(hashSalt) { + return nil, fmt.Errorf("wrote %d/%d bytes of hash salt with error %s", n, len(hashSalt), err) + } if verify { h.buf = new(bytes.Buffer) } - return h + return h, nil } // Write writes data to the running hash. diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go index 79da2808a2d..86007d0427d 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go @@ -17,6 +17,14 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +type HashMode int + +const ( + HashModeNeedOnlySelf HashMode = iota + HashModeNeedDirectDeps + HashModeNeedAllDeps +) + // Cache is a per-package data cache. A cached data is invalidated when // package or it's dependencies change. type Cache struct { @@ -46,7 +54,7 @@ func (c *Cache) Trim() { }) } -func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { +func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data interface{}) error { var err error buf := &bytes.Buffer{} c.sw.TrackStage("gob", func() { @@ -59,9 +67,13 @@ func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { var aID cache.ActionID c.sw.TrackStage("key build", func() { - aID, err = c.pkgActionID(pkg) + aID, err = c.pkgActionID(pkg, mode) if err == nil { - aID = cache.Subkey(aID, key) + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey } }) if err != nil { @@ -81,13 +93,17 @@ func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { var ErrMissing = errors.New("missing data") -func (c *Cache) Get(pkg *packages.Package, key string, data interface{}) error { +func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data interface{}) error { var aID cache.ActionID var err error c.sw.TrackStage("key build", func() { - aID, err = c.pkgActionID(pkg) + aID, err = c.pkgActionID(pkg, mode) if err == nil { - aID = cache.Subkey(aID, key) + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey } }) if err != nil { @@ -117,13 +133,16 @@ func (c *Cache) Get(pkg *packages.Package, key string, data interface{}) error { return nil } -func (c *Cache) pkgActionID(pkg *packages.Package) (cache.ActionID, error) { - hash, err := c.packageHash(pkg) +func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) { + hash, err := c.packageHash(pkg, mode) if err != nil { return cache.ActionID{}, errors.Wrap(err, "failed to get package hash") } - key := cache.NewHash("action ID") + key, err := cache.NewHash("action ID") + if err != nil { + return cache.ActionID{}, errors.Wrap(err, "failed to make a hash") + } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) fmt.Fprintf(key, "pkghash %s\n", hash) @@ -133,23 +152,36 @@ func (c *Cache) pkgActionID(pkg *packages.Package) (cache.ActionID, error) { // packageHash computes a package's hash. The hash is based on all Go // files that make up the package, as well as the hashes of imported // packages. -func (c *Cache) packageHash(pkg *packages.Package) (string, error) { - cachedHash, ok := c.pkgHashes.Load(pkg) +func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error) { + type hashResults map[HashMode]string + hashResI, ok := c.pkgHashes.Load(pkg) if ok { - return cachedHash.(string), nil + hashRes := hashResI.(hashResults) + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("no mode %d in hash result", mode) + } + return hashRes[mode], nil + } + + hashRes := hashResults{} + + key, err := cache.NewHash("package hash") + if err != nil { + return "", errors.Wrap(err, "failed to make a hash") } - key := cache.NewHash("package hash") fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) for _, f := range pkg.CompiledGoFiles { c.ioSem <- struct{}{} - h, err := cache.FileHash(f) + h, fErr := cache.FileHash(f) <-c.ioSem - if err != nil { - return "", errors.Wrapf(err, "failed to calculate file %s hash", f) + if fErr != nil { + return "", errors.Wrapf(fErr, "failed to calculate file %s hash", f) } fmt.Fprintf(key, "file %s %x\n", f, h) } + curSum := key.Sum() + hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:]) imps := make([]*packages.Package, 0, len(pkg.Imports)) for _, imp := range pkg.Imports { @@ -158,20 +190,40 @@ func (c *Cache) packageHash(pkg *packages.Package) (string, error) { sort.Slice(imps, func(i, j int) bool { return imps[i].PkgPath < imps[j].PkgPath }) - for _, dep := range imps { - if dep.PkgPath == "unsafe" { - continue - } - depHash, err := c.packageHash(dep) - if err != nil { - return "", errors.Wrapf(err, "failed to calculate hash for dependency %s", dep.Name) + calcDepsHash := func(depMode HashMode) error { + for _, dep := range imps { + if dep.PkgPath == "unsafe" { + continue + } + + depHash, depErr := c.packageHash(dep, depMode) + if depErr != nil { + return errors.Wrapf(depErr, "failed to calculate hash for dependency %s with mode %d", dep.Name, depMode) + } + + fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) } + return nil + } + + if err := calcDepsHash(HashModeNeedOnlySelf); err != nil { + return "", err + } + + curSum = key.Sum() + hashRes[HashModeNeedDirectDeps] = hex.EncodeToString(curSum[:]) - fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) + if err := calcDepsHash(HashModeNeedAllDeps); err != nil { + return "", err } - h := key.Sum() - ret := hex.EncodeToString(h[:]) - c.pkgHashes.Store(pkg, ret) - return ret, nil + curSum = key.Sum() + hashRes[HashModeNeedAllDeps] = hex.EncodeToString(curSum[:]) + + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("invalid mode %d", mode) + } + + c.pkgHashes.Store(pkg, hashRes) + return hashRes[mode], nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go new file mode 100644 index 00000000000..7fa04e85ed9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go @@ -0,0 +1,86 @@ +package commands + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/fsutils" + + "github.com/golangci/golangci-lint/internal/cache" + + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +func (e *Executor) initCache() { + cacheCmd := &cobra.Command{ + Use: "cache", + Short: "Cache control and information", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run cache: %s", err) + } + }, + } + e.rootCmd.AddCommand(cacheCmd) + + cacheCmd.AddCommand(&cobra.Command{ + Use: "clean", + Short: "Clean cache", + Run: e.executeCleanCache, + }) + cacheCmd.AddCommand(&cobra.Command{ + Use: "status", + Short: "Show cache status", + Run: e.executeCacheStatus, + }) + + // TODO: add trim command? +} + +func (e *Executor) executeCleanCache(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache clean") + } + + cacheDir := cache.DefaultDir() + if err := os.RemoveAll(cacheDir); err != nil { + e.log.Fatalf("Failed to remove dir %s: %s", cacheDir, err) + } + + os.Exit(0) +} + +func (e *Executor) executeCacheStatus(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache status") + } + + cacheDir := cache.DefaultDir() + fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + cacheSizeBytes, err := dirSizeBytes(cacheDir) + if err == nil { + fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) + } + + os.Exit(0) +} + +func dirSizeBytes(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return err + }) + return size, err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go index 62429e726b5..975a3c50758 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go @@ -1,10 +1,25 @@ package commands import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/golangci/golangci-lint/internal/cache" + "github.com/fatih/color" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/gofrs/flock" + "github.com/golangci/golangci-lint/internal/pkgcache" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" @@ -38,6 +53,7 @@ type Executor struct { sw *timeutils.Stopwatch loadGuard *load.Guard + flock *flock.Flock } func NewExecutor(version, commit, date string) *Executor { @@ -52,6 +68,9 @@ func NewExecutor(version, commit, date string) *Executor { e.debugf("Starting execution...") e.log = report.NewLogWrapper(logutils.NewStderrLog(""), &e.reportData) + if ok := e.acquireFileLock(); !ok { + e.log.Fatalf("Parallel golangci-lint is running") + } // to setup log level early we need to parse config from command line extra time to // find `-v` option @@ -83,6 +102,7 @@ func NewExecutor(version, commit, date string) *Executor { e.initConfig() e.initCompletion() e.initVersion() + e.initCache() // init e.cfg by values from config: flags parse will see these values // like the default ones. It will overwrite them only if the same option @@ -118,6 +138,9 @@ func NewExecutor(version, commit, date string) *Executor { e.loadGuard = load.NewGuard() e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, e.lineCache, e.fileCache, e.pkgCache, e.loadGuard) + if err = e.initHashSalt(version); err != nil { + e.log.Fatalf("Failed to init hash salt: %s", err) + } e.debugf("Initialized executor") return e } @@ -125,3 +148,80 @@ func NewExecutor(version, commit, date string) *Executor { func (e *Executor) Execute() error { return e.rootCmd.Execute() } + +func (e *Executor) initHashSalt(version string) error { + binSalt, err := computeBinarySalt(version) + if err != nil { + return errors.Wrap(err, "failed to calculate binary salt") + } + + configSalt, err := computeConfigSalt(e.cfg) + if err != nil { + return errors.Wrap(err, "failed to calculate config salt") + } + + var b bytes.Buffer + b.Write(binSalt) + b.Write(configSalt) + cache.SetSalt(b.Bytes()) + return nil +} + +func computeBinarySalt(version string) ([]byte, error) { + if version != "" && version != "(devel)" { + return []byte(version), nil + } + + if logutils.HaveDebugTag("bin_salt") { + return []byte("debug"), nil + } + + p, err := os.Executable() + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +func computeConfigSalt(cfg *config.Config) ([]byte, error) { + configBytes, err := json.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to json marshal config") + } + + h := sha256.New() + if n, err := h.Write(configBytes); n != len(configBytes) { + return nil, fmt.Errorf("failed to hash config bytes: wrote %d/%d bytes, error: %s", n, len(configBytes), err) + } + return h.Sum(nil), nil +} + +func (e *Executor) acquireFileLock() bool { + lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock") + e.debugf("Locking on file %s...", lockFile) + f := flock.New(lockFile) + ctx, finish := context.WithTimeout(context.Background(), time.Minute) + defer finish() + + if ok, _ := f.TryLockContext(ctx, time.Second*3); !ok { + return false + } + + e.flock = f + return true +} + +func (e *Executor) releaseFileLock() { + if err := e.flock.Unlock(); err != nil { + e.debugf("Failed to unlock on file: %s", err) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go index 21e7fa2e973..e61ecc94bb5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go @@ -73,6 +73,7 @@ func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { trace.Stop() } + e.releaseFileLock() os.Exit(e.exitCode) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 7e5f429d1b8..4521a2e8807 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -54,6 +54,8 @@ func wh(text string) string { return color.GreenString(text) } +const defaultTimeout = time.Minute + //nolint:funlen func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) { hideFlag := func(name string) { @@ -82,12 +84,17 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is // Run config rc := &cfg.Run + fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "", + "Modules download mode. If not empty, passed as -mod= to go tools") fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code", exitcodes.IssuesFound, wh("Exit code when issues were found")) fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags")) - fs.DurationVar(&rc.Timeout, "deadline", time.Minute, wh("Deadline for total work")) - hideFlag("deadline") - fs.DurationVar(&rc.Timeout, "timeout", time.Minute, wh("Timeout for total work")) + + fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work")) + if err := fs.MarkHidden("deadline"); err != nil { + panic(err) + } + fs.DurationVar(&rc.Timeout, "timeout", defaultTimeout, wh("Timeout for total work")) fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)")) fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false, @@ -166,6 +173,11 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter")) fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter")) fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters")) + if err := fs.MarkHidden("enable-all"); err != nil { + panic(err) + } + // TODO: run hideFlag("enable-all") to print deprecation message. + fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters")) fs.StringSliceVarP(&lc.Presets, "presets", "p", nil, wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+ @@ -390,6 +402,7 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { } }() + e.setTimeoutToDeadlineIfOnlyDeadlineIsSet() ctx, cancel := context.WithTimeout(context.Background(), e.cfg.Run.Timeout) defer cancel() @@ -411,6 +424,15 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { e.setupExitCode(ctx) } +// to be removed when deadline is finally decommissioned +func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() { + //lint:ignore SA1019 We want to promoted the deprecated config value when needed + deadlineValue := e.cfg.Run.Deadline // nolint: staticcheck + if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout { + e.cfg.Run.Timeout = deadlineValue + } +} + func (e *Executor) setupExitCode(ctx context.Context) { if ctx.Err() != nil { e.exitCode = exitcodes.Timeout @@ -448,7 +470,6 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log const MB = 1024 * 1024 track := func() { - debugf("Starting memory tracing iteration ...") var m runtime.MemStats runtime.ReadMemStats(&m) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 90b0669683f..1b8de67585f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -255,9 +255,11 @@ type GocognitSettings struct { } type WSLSettings struct { - StrictAppend bool `mapstructure:"strict-append"` - AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` - AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + StrictAppend bool `mapstructure:"strict-append"` + AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` + AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + AllowCaseTrailingWhitespace bool `mapstructure:"allow-case-trailing-whitespace"` + AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` } var defaultLintersSettings = LintersSettings{ @@ -289,9 +291,11 @@ var defaultLintersSettings = LintersSettings{ MinComplexity: 30, }, WSL: WSLSettings{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowMultiLineAssignCuddle: true, + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowMultiLineAssignCuddle: true, + AllowCaseTrailingWhitespace: true, + AllowCuddleDeclaration: false, }, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go index 7c74a29973a..536f9036142 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go @@ -30,3 +30,5 @@ var ( Code: Failure, } ) + +// 1 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go index b3c282429fc..2b17a039861 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go @@ -33,7 +33,7 @@ func (fc *FileCache) GetFileBytes(filePath string) ([]byte, error) { return fileBytes, nil } -func prettifyBytesCount(n int) string { +func PrettifyBytesCount(n int64) string { const ( Multiplexer = 1024 KiB = 1 * Multiplexer @@ -54,14 +54,14 @@ func prettifyBytesCount(n int) string { } func (fc *FileCache) PrintStats(log logutils.Log) { - var size int + var size int64 var mapLen int fc.files.Range(func(_, fileBytes interface{}) bool { mapLen++ - size += len(fileBytes.([]byte)) + size += int64(len(fileBytes.([]byte))) return true }) - log.Infof("File cache stats: %d entries of total size %s", mapLen, prettifyBytesCount(size)) + log.Infof("File cache stats: %d entries of total size %s", mapLen, PrettifyBytesCount(size)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go index 269275ceb59..9889dad4ed6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go @@ -15,10 +15,10 @@ import ( func NewDeadcode() *goanalysis.Linter { const linterName = "deadcode" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { prog := goanalysis.MakeFakeLoaderProgram(pass) @@ -26,13 +26,13 @@ func NewDeadcode() *goanalysis.Linter { if err != nil { return nil, err } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.UnusedIdentName, nil)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() resIssues = append(resIssues, res...) @@ -46,7 +46,7 @@ func NewDeadcode() *goanalysis.Linter { "Finds unused code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go index 1b73fc076aa..611f6d4950b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go @@ -49,10 +49,10 @@ func setupDepguardPackages(dg *depguard.Depguard, lintCtx *linter.Context) { func NewDepguard() *goanalysis.Linter { const linterName = "depguard" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -88,17 +88,17 @@ func NewDepguard() *goanalysis.Linter { if dg.ListType == depguard.LTWhitelist { msgSuffix = "is not in the whitelist" } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { userSuppliedMsgSuffix := dgSettings.PackagesWithErrorMessage[i.PackageName] if userSuppliedMsgSuffix != "" { userSuppliedMsgSuffix = ": " + userSuppliedMsgSuffix } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Position, Text: fmt.Sprintf("%s %s%s", formatCode(i.PackageName, lintCtx.Cfg), msgSuffix, userSuppliedMsgSuffix), FromLinter: linterName, - }) + }, pass)) } mu.Lock() resIssues = append(resIssues, res...) @@ -106,7 +106,7 @@ func NewDepguard() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go index 4cddf40064f..8978ff913dc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go @@ -17,10 +17,10 @@ const dogsledLinterName = "dogsled" func NewDogsled() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: dogsledLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -30,14 +30,16 @@ func NewDogsled() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var pkgIssues []result.Issue + var pkgIssues []goanalysis.Issue for _, f := range pass.Files { v := returnsVisitor{ maxBlanks: lintCtx.Settings().Dogsled.MaxBlankIdentifiers, f: pass.Fset, } ast.Walk(&v, f) - pkgIssues = append(pkgIssues, v.issues...) + for i := range v.issues { + pkgIssues = append(pkgIssues, goanalysis.NewIssue(&v.issues[i], pass)) + } } mu.Lock() @@ -46,7 +48,7 @@ func NewDogsled() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go index deb14900680..d6dc67fbb0e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go @@ -19,10 +19,10 @@ const duplLinterName = "dupl" func NewDupl() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: duplLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -47,7 +47,7 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") if err != nil { @@ -57,7 +57,7 @@ func NewDupl() *goanalysis.Linter { text := fmt.Sprintf("%d-%d lines are duplicate of %s", i.From.LineStart(), i.From.LineEnd(), formatCode(dupl, lintCtx.Cfg)) - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.From.Filename(), Line: i.From.LineStart(), @@ -68,7 +68,7 @@ func NewDupl() *goanalysis.Linter { }, Text: text, FromLinter: duplLinterName, - }) + }, pass)) } mu.Lock() @@ -77,7 +77,7 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go index 0e1a94d3f0a..bf6b9a45348 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go @@ -24,9 +24,9 @@ import ( func NewErrcheck() *goanalysis.Linter { const linterName = "errcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -51,7 +51,7 @@ func NewErrcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(errcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(errcheckIssues)) for _, i := range errcheckIssues { var text string if i.FuncName != "" { @@ -59,18 +59,18 @@ func NewErrcheck() *goanalysis.Linter { } else { text = "Error return value is not checked" } - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: linterName, Text: text, Pos: i.Pos, - }) + }, pass)) } mu.Lock() res = append(res, issues...) mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go index d00485f17c0..3031da48350 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go @@ -17,10 +17,10 @@ const funlenLinterName = "funlen" func NewFunlen() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: funlenLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,16 +40,16 @@ func NewFunlen() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { - res[k] = result.Issue{ + res[k] = goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.Pos.Filename, Line: i.Pos.Line, }, Text: strings.TrimRight(i.Message, "\n"), FromLinter: funlenLinterName, - } + }, pass) } mu.Lock() @@ -58,7 +58,7 @@ func NewFunlen() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go new file mode 100644 index 00000000000..b90a2912b9a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go @@ -0,0 +1,29 @@ +package goanalysis + +import ( + "go/token" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Issue struct { + result.Issue + Pass *analysis.Pass +} + +func NewIssue(i *result.Issue, pass *analysis.Pass) Issue { + return Issue{ + Issue: *i, + Pass: pass, + } +} + +type EncodingIssue struct { + FromLinter string + Text string + Pos token.Position + LineRange *result.Range + Replacement *result.Replacement +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go index ca35400ec81..bad7ab4ffbb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go @@ -4,7 +4,17 @@ import ( "context" "flag" "fmt" + "runtime" + "sort" "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golangci/golangci-lint/pkg/timeutils" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/logutils" "golang.org/x/tools/go/packages" @@ -30,6 +40,8 @@ const ( LoadModeWholeProgram ) +var issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache") + func (loadMode LoadMode) String() string { switch loadMode { case LoadModeNone: @@ -45,14 +57,14 @@ func (loadMode LoadMode) String() string { } type Linter struct { - name, desc string - analyzers []*analysis.Analyzer - cfg map[string]map[string]interface{} - issuesReporter func(*linter.Context) []result.Issue - contextSetter func(*linter.Context) - loadMode LoadMode - useOriginalPackages bool - isTypecheckMode bool + name, desc string + analyzers []*analysis.Analyzer + cfg map[string]map[string]interface{} + issuesReporter func(*linter.Context) []Issue + contextSetter func(*linter.Context) + loadMode LoadMode + needUseOriginalPackages bool + isTypecheckModeOn bool } func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter { @@ -60,11 +72,11 @@ func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string } func (lnt *Linter) UseOriginalPackages() { - lnt.useOriginalPackages = true + lnt.needUseOriginalPackages = true } func (lnt *Linter) SetTypecheckMode() { - lnt.isTypecheckMode = true + lnt.isTypecheckModeOn = true } func (lnt *Linter) LoadMode() LoadMode { @@ -76,7 +88,7 @@ func (lnt *Linter) WithLoadMode(loadMode LoadMode) *Linter { return lnt } -func (lnt *Linter) WithIssuesReporter(r func(*linter.Context) []result.Issue) *Linter { +func (lnt *Linter) WithIssuesReporter(r func(*linter.Context) []Issue) *Linter { lnt.issuesReporter = r return lnt } @@ -198,6 +210,7 @@ func buildIssuesFromErrorsForTypecheckMode(errs []error, lintCtx *linter.Context uniqReportedIssues[err.Msg] = true lintCtx.Log.Errorf("typechecking error: %s", err.Msg) } else { + i.Pkg = itErr.Pkg // to save to cache later issues = append(issues, *i) } } @@ -209,48 +222,270 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st var issues []result.Issue for i := range diags { diag := &diags[i] + linterName := linterNameBuilder(diag) var text string - if diag.Analyzer.Name == TheOnlyAnalyzerName { + if diag.Analyzer.Name == linterName { text = diag.Message } else { text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } issues = append(issues, result.Issue{ - FromLinter: linterNameBuilder(diag), + FromLinter: linterName, Text: text, Pos: diag.Position, + Pkg: diag.Pkg, }) } return issues } -func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { +func (lnt *Linter) preRun(lintCtx *linter.Context) error { if err := analysis.Validate(lnt.analyzers); err != nil { - return nil, errors.Wrap(err, "failed to validate analyzers") + return errors.Wrap(err, "failed to validate analyzers") } if err := lnt.configure(); err != nil { - return nil, errors.Wrap(err, "failed to configure analyzers") + return errors.Wrap(err, "failed to configure analyzers") } if lnt.contextSetter != nil { lnt.contextSetter(lintCtx) } - loadMode := lnt.loadMode - runner := newRunner(lnt.name, lintCtx.Log.Child("goanalysis"), - lintCtx.PkgCache, lintCtx.LoadGuard, loadMode) + return nil +} + +func (lnt *Linter) getName() string { + return lnt.name +} + +func (lnt *Linter) getLinterNameForDiagnostic(*Diagnostic) string { + return lnt.name +} + +func (lnt *Linter) getAnalyzers() []*analysis.Analyzer { + return lnt.analyzers +} + +func (lnt *Linter) useOriginalPackages() bool { + return lnt.needUseOriginalPackages +} + +func (lnt *Linter) isTypecheckMode() bool { + return lnt.isTypecheckModeOn +} + +func (lnt *Linter) reportIssues(lintCtx *linter.Context) []Issue { + if lnt.issuesReporter != nil { + return lnt.issuesReporter(lintCtx) + } + return nil +} + +func (lnt *Linter) getLoadMode() LoadMode { + return lnt.loadMode +} + +type runAnalyzersConfig interface { + getName() string + getLinterNameForDiagnostic(*Diagnostic) string + getAnalyzers() []*analysis.Analyzer + useOriginalPackages() bool + isTypecheckMode() bool + reportIssues(*linter.Context) []Issue + getLoadMode() LoadMode +} + +func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { + return "lint/result:" + analyzersHashID(analyzers) +} + +func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, + issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { + startedAt := time.Now() + perPkgIssues := map[*packages.Package][]result.Issue{} + for ind := range issues { + i := &issues[ind] + perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i) + } + + savedIssuesCount := int32(0) + lintResKey := getIssuesCacheKey(analyzers) + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(allPkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + pkgIssues := perPkgIssues[pkg] + encodedIssues := make([]EncodingIssue, 0, len(pkgIssues)) + for ind := range pkgIssues { + i := &pkgIssues[ind] + encodedIssues = append(encodedIssues, EncodingIssue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + }) + } + + atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues))) + if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil { + lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err) + } else { + issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues)) + } + } + }() + } + + for _, pkg := range allPkgs { + if pkgsFromCache[pkg] { + continue + } + + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) +} + +//nolint:gocritic +func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, + analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { + startedAt := time.Now() + + lintResKey := getIssuesCacheKey(analyzers) + type cacheRes struct { + issues []result.Issue + loadErr error + } + pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs)) + for _, pkg := range pkgs { + pkgToCacheRes[pkg] = &cacheRes{} + } + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(pkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + var pkgIssues []EncodingIssue + err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues) + cacheRes := pkgToCacheRes[pkg] + cacheRes.loadErr = err + if err != nil { + continue + } + if len(pkgIssues) == 0 { + continue + } + + issues := make([]result.Issue, 0, len(pkgIssues)) + for _, i := range pkgIssues { + issues = append(issues, result.Issue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + Pkg: pkg, + }) + } + cacheRes.issues = issues + } + }() + } + + for _, pkg := range pkgs { + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + loadedIssuesCount := 0 + var issues []result.Issue + pkgsFromCache := map[*packages.Package]bool{} + for pkg, cacheRes := range pkgToCacheRes { + if cacheRes.loadErr == nil { + loadedIssuesCount += len(cacheRes.issues) + pkgsFromCache[pkg] = true + issues = append(issues, cacheRes.issues...) + issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) + } else { + issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) + } + } + issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", + loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) + return issues, pkgsFromCache +} + +func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { + log := lintCtx.Log.Child("goanalysis") + sw := timeutils.NewStopwatch("analyzers", log) + defer sw.PrintTopStages(10) + + runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw) pkgs := lintCtx.Packages - if lnt.useOriginalPackages { + if cfg.useOriginalPackages() { pkgs = lintCtx.OriginalPackages } - diags, errs := runner.run(lnt.analyzers, pkgs) + issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers()) + var pkgsToAnalyze []*packages.Package + for _, pkg := range pkgs { + if !pkgsFromCache[pkg] { + pkgsToAnalyze = append(pkgsToAnalyze, pkg) + } + } - linterNameBuilder := func(*Diagnostic) string { return lnt.Name() } - if lnt.isTypecheckMode { - return buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) + diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze) + + defer func() { + if len(errs) == 0 { + // If we try to save to cache even if we have compilation errors + // we won't see them on repeated runs. + saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers()) + } + }() + + buildAllIssues := func() []result.Issue { + var retIssues []result.Issue + reportedIssues := cfg.reportIssues(lintCtx) + for i := range reportedIssues { + issue := &reportedIssues[i].Issue + if issue.Pkg == nil { + issue.Pkg = passToPkg[reportedIssues[i].Pass] + } + retIssues = append(retIssues, *issue) + } + retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...) + return retIssues + } + + if cfg.isTypecheckMode() { + errIssues, err := buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) + if err != nil { + return nil, err + } + + issues = append(issues, errIssues...) + issues = append(issues, buildAllIssues()...) + + return issues, nil } // Don't print all errs: they can duplicate. @@ -258,12 +493,24 @@ func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.I return nil, errs[0] } - var issues []result.Issue - if lnt.issuesReporter != nil { - issues = append(issues, lnt.issuesReporter(lintCtx)...) - } else { - issues = buildIssues(diags, linterNameBuilder) + issues = append(issues, buildAllIssues()...) + return issues, nil +} + +func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + if err := lnt.preRun(lintCtx); err != nil { + return nil, err } - return issues, nil + return runAnalyzers(lnt, lintCtx) +} + +func analyzersHashID(analyzers []*analysis.Analyzer) string { + names := make([]string, 0, len(analyzers)) + for _, a := range analyzers { + names = append(names, a.Name) + } + + sort.Strings(names) + return strings.Join(names, ",") } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go index 96ea36a4b20..5975e2057a1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go @@ -11,11 +11,14 @@ import ( ) type MetaLinter struct { - linters []*Linter + linters []*Linter + analyzerToLinterName map[*analysis.Analyzer]string } func NewMetaLinter(linters []*Linter) *MetaLinter { - return &MetaLinter{linters: linters} + ml := &MetaLinter{linters: linters} + ml.analyzerToLinterName = ml.getAnalyzerToLinterNameMapping() + return ml } func (ml MetaLinter) Name() string { @@ -28,7 +31,7 @@ func (ml MetaLinter) Desc() string { func (ml MetaLinter) isTypecheckMode() bool { for _, linter := range ml.linters { - if linter.isTypecheckMode { + if linter.isTypecheckMode() { return true } } @@ -45,15 +48,7 @@ func (ml MetaLinter) getLoadMode() LoadMode { return loadMode } -func (ml MetaLinter) runContextSetters(lintCtx *linter.Context) { - for _, linter := range ml.linters { - if linter.contextSetter != nil { - linter.contextSetter(lintCtx) - } - } -} - -func (ml MetaLinter) getAllAnalyzers() []*analysis.Analyzer { +func (ml MetaLinter) getAnalyzers() []*analysis.Analyzer { var allAnalyzers []*analysis.Analyzer for _, linter := range ml.linters { allAnalyzers = append(allAnalyzers, linter.analyzers...) @@ -61,75 +56,44 @@ func (ml MetaLinter) getAllAnalyzers() []*analysis.Analyzer { return allAnalyzers } -func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string { - analyzerToLinterName := map[*analysis.Analyzer]string{} - for _, linter := range ml.linters { - for _, a := range linter.analyzers { - analyzerToLinterName[a] = linter.Name() - } - } - return analyzerToLinterName +func (ml MetaLinter) getName() string { + return "metalinter" } -func (ml MetaLinter) configure() error { - for _, linter := range ml.linters { - if err := linter.configure(); err != nil { - return errors.Wrapf(err, "failed to configure analyzers of %s", linter.Name()) - } - } - return nil +func (ml MetaLinter) useOriginalPackages() bool { + return false // `unused` can't be run by this metalinter } -func (ml MetaLinter) validate() error { - for _, linter := range ml.linters { - if err := analysis.Validate(linter.analyzers); err != nil { - return errors.Wrapf(err, "failed to validate analyzers of %s", linter.Name()) +func (ml MetaLinter) reportIssues(lintCtx *linter.Context) []Issue { + var ret []Issue + for _, lnt := range ml.linters { + if lnt.issuesReporter != nil { + ret = append(ret, lnt.issuesReporter(lintCtx)...) } } - return nil + return ret } -func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { - if err := ml.validate(); err != nil { - return nil, err - } - - if err := ml.configure(); err != nil { - return nil, err - } - ml.runContextSetters(lintCtx) - - analyzerToLinterName := ml.getAnalyzerToLinterNameMapping() - - runner := newRunner("metalinter", lintCtx.Log.Child("goanalysis"), - lintCtx.PkgCache, lintCtx.LoadGuard, ml.getLoadMode()) - - diags, errs := runner.run(ml.getAllAnalyzers(), lintCtx.Packages) - - buildAllIssues := func() []result.Issue { - linterNameBuilder := func(diag *Diagnostic) string { return analyzerToLinterName[diag.Analyzer] } - issues := buildIssues(diags, linterNameBuilder) +func (ml MetaLinter) getLinterNameForDiagnostic(diag *Diagnostic) string { + return ml.analyzerToLinterName[diag.Analyzer] +} - for _, linter := range ml.linters { - if linter.issuesReporter != nil { - issues = append(issues, linter.issuesReporter(lintCtx)...) - } +func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string { + analyzerToLinterName := map[*analysis.Analyzer]string{} + for _, linter := range ml.linters { + for _, a := range linter.analyzers { + analyzerToLinterName[a] = linter.Name() } - return issues } + return analyzerToLinterName +} - if ml.isTypecheckMode() { - issues, err := buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) - if err != nil { - return nil, err +func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + for _, linter := range ml.linters { + if err := linter.preRun(lintCtx); err != nil { + return nil, errors.Wrapf(err, "failed to pre-run %s", linter.Name()) } - return append(issues, buildAllIssues()...), nil - } - - // Don't print all errs: they can duplicate. - if len(errs) != 0 { - return nil, errs[0] } - return buildAllIssues(), nil + return runAnalyzers(ml, lintCtx) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go index e194a811c2d..5d8470dbd04 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go @@ -28,6 +28,8 @@ import ( "sync/atomic" "time" + "github.com/golangci/golangci-lint/pkg/timeutils" + "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/gcexportdata" @@ -69,23 +71,30 @@ type Diagnostic struct { analysis.Diagnostic Analyzer *analysis.Analyzer Position token.Position + Pkg *packages.Package } type runner struct { - log logutils.Log - prefix string // ensure unique analyzer names - pkgCache *pkgcache.Cache - loadGuard *load.Guard - loadMode LoadMode + log logutils.Log + prefix string // ensure unique analyzer names + pkgCache *pkgcache.Cache + loadGuard *load.Guard + loadMode LoadMode + passToPkg map[*analysis.Pass]*packages.Package + passToPkgGuard sync.Mutex + sw *timeutils.Stopwatch } -func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, loadMode LoadMode) *runner { +func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, + loadMode LoadMode, sw *timeutils.Stopwatch) *runner { return &runner{ prefix: prefix, log: logger, pkgCache: pkgCache, loadGuard: loadGuard, loadMode: loadMode, + passToPkg: map[*analysis.Pass]*packages.Package{}, + sw: sw, } } @@ -95,12 +104,14 @@ func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loa // It provides most of the logic for the main functions of both the // singlechecker and the multi-analysis commands. // It returns the appropriate exit code. -func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, []error) { +func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, + []error, map[*analysis.Pass]*packages.Package) { debugf("Analyzing %d packages on load mode %s", len(initialPackages), r.loadMode) defer r.pkgCache.Trim() roots := r.analyze(initialPackages, analyzers) - return extractDiagnostics(roots) + diags, errs := extractDiagnostics(roots) + return diags, errs, r.passToPkg } type actKey struct { @@ -138,9 +149,7 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, act = actAlloc.alloc() act.a = a act.pkg = pkg - act.log = r.log - act.prefix = r.prefix - act.pkgCache = r.pkgCache + act.r = r act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -362,7 +371,13 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err } seen[k] = true - retDiags = append(retDiags, Diagnostic{Diagnostic: diag, Analyzer: act.a, Position: posn}) + retDiag := Diagnostic{ + Diagnostic: diag, + Analyzer: act.a, + Position: posn, + Pkg: act.pkg, + } + retDiags = append(retDiags, retDiag) } } } @@ -404,9 +419,7 @@ type action struct { result interface{} diagnostics []analysis.Diagnostic err error - log logutils.Log - prefix string - pkgCache *pkgcache.Cache + r *runner analysisDoneCh chan struct{} loadCachedFactsDone bool loadCachedFactsOk bool @@ -473,7 +486,9 @@ func (act *action) analyzeSafe() { act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } }() - act.analyze() + act.r.sw.TrackStage(act.a.Name, func() { + act.analyze() + }) } func (act *action) analyze() { @@ -483,22 +498,8 @@ func (act *action) analyze() { return } - // TODO(adonovan): uncomment this during profiling. - // It won't build pre-go1.11 but conditional compilation - // using build tags isn't warranted. - // - // ctx, task := trace.NewTask(context.Background(), "exec") - // trace.Log(ctx, "pass", act.String()) - // defer task.End() - - // Record time spent in this node but not its dependencies. - // In parallel mode, due to GC/scheduler contention, the - // time is 5x higher than in sequential mode, even with a - // semaphore limiting the number of threads here. - // So use -debug=tp. - defer func(now time.Time) { - analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.prefix, act.a.Name, act.pkg.Name, time.Since(now)) + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) }(time.Now()) // Report an error if any dependency failed. @@ -552,6 +553,9 @@ func (act *action) analyze() { AllPackageFacts: act.allPackageFacts, } act.pass = pass + act.r.passToPkgGuard.Lock() + act.r.passToPkg[pass] = act.pkg + act.r.passToPkgGuard.Unlock() var err error if act.pkg.IllTyped { @@ -574,7 +578,7 @@ func (act *action) analyze() { pass.ExportPackageFact = nil if err := act.persistFactsToCache(); err != nil { - act.log.Warnf("Failed to persist facts to cache: %s", err) + act.r.log.Warnf("Failed to persist facts to cache: %s", err) } } @@ -598,7 +602,7 @@ func inheritFacts(act, dep *action) { var err error fact, err = codeFact(fact) if err != nil { - act.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } } @@ -618,7 +622,7 @@ func inheritFacts(act, dep *action) { var err error fact, err = codeFact(fact) if err != nil { - act.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } } @@ -695,7 +699,7 @@ func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { // exportObjectFact implements Pass.ExportObjectFact. func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { if obj.Pkg() != act.pkg.Types { - act.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", act.a, act.pkg, obj, fact) } @@ -756,7 +760,7 @@ func (act *action) allPackageFacts() []analysis.PackageFact { func (act *action) factType(fact analysis.Fact) reflect.Type { t := reflect.TypeOf(fact) if t.Kind() != reflect.Ptr { - act.log.Fatalf("invalid Fact type: got %T, want pointer", t) + act.r.log.Fatalf("invalid Fact type: got %T, want pointer", t) } return t } @@ -806,15 +810,15 @@ func (act *action) persistFactsToCache() error { factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) key := fmt.Sprintf("%s/facts", analyzer.Name) - return act.pkgCache.Put(act.pkg, key, facts) + return act.r.pkgCache.Put(act.pkg, pkgcache.HashModeNeedAllDeps, key, facts) } func (act *action) loadPersistedFacts() bool { var facts []Fact key := fmt.Sprintf("%s/facts", act.a.Name) - if err := act.pkgCache.Get(act.pkg, key, &facts); err != nil { + if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil { if err != pkgcache.ErrMissing { - act.log.Warnf("Failed to get persisted facts: %s", err) + act.r.log.Warnf("Failed to get persisted facts: %s", err) } factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go index 244ebfc3acd..f2166416be3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go @@ -19,15 +19,18 @@ const gochecknoglobalsName = "gochecknoglobals" //nolint:dupl func NewGochecknoglobals() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gochecknoglobalsName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { - res = append(res, checkFileForGlobals(file, pass.Fset)...) + fileIssues := checkFileForGlobals(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -45,7 +48,7 @@ func NewGochecknoglobals() *goanalysis.Linter { "Checks that no globals are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go index 7d9140807ba..18465b13068 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go @@ -18,15 +18,18 @@ const gochecknoinitsName = "gochecknoinits" //nolint:dupl func NewGochecknoinits() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gochecknoinitsName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { - res = append(res, checkFileForInits(file, pass.Fset)...) + fileIssues := checkFileForInits(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -44,7 +47,7 @@ func NewGochecknoinits() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go index 8a3b65cd28d..71301180874 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go @@ -18,7 +18,7 @@ const gocognitName = "gocognit" func NewGocognit() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, @@ -43,18 +43,18 @@ func NewGocognit() *goanalysis.Linter { return stats[i].Complexity > stats[j].Complexity }) - res := make([]result.Issue, 0, len(stats)) + res := make([]goanalysis.Issue, 0, len(stats)) for _, s := range stats { if s.Complexity <= lintCtx.Settings().Gocognit.MinComplexity { break // Break as the stats is already sorted from greatest to least } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: s.Pos, Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocognit.MinComplexity), FromLinter: gocognitName, - }) + }, pass)) } mu.Lock() @@ -63,7 +63,7 @@ func NewGocognit() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go index 00787c8cbcb..c77dc64e839 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go @@ -16,10 +16,10 @@ const goconstName = "goconst" func NewGoconst() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: goconstName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,12 +40,12 @@ func NewGoconst() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issue, error) { +func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]goanalysis.Issue, error) { cfg := goconstAPI.Config{ MatchWithConstants: true, MinStringLength: lintCtx.Settings().Goconst.MinStringLen, @@ -61,7 +61,7 @@ func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issu return nil, nil } - res := make([]result.Issue, 0, len(goconstIssues)) + res := make([]goanalysis.Issue, 0, len(goconstIssues)) for _, i := range goconstIssues { textBegin := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, lintCtx.Cfg), i.OccurencesCount) var textEnd string @@ -70,11 +70,11 @@ func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issu } else { textEnd = fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, lintCtx.Cfg)) } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: textBegin + textEnd, FromLinter: goconstName, - }) + }, pass)) } return res, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go index d040c44dca1..fb29252096b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go @@ -23,12 +23,12 @@ const gocriticName = "gocritic" func NewGocritic() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue sizes := types.SizesFor("gc", runtime.GOARCH) analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gocriticName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -45,7 +45,11 @@ func NewGocritic() *goanalysis.Linter { } lintpackCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - res := runGocriticOnPackage(lintpackCtx, enabledCheckers, pass.Files) + var res []goanalysis.Issue + pkgIssues := runGocriticOnPackage(lintpackCtx, enabledCheckers, pass.Files) + for i := range pkgIssues { + res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass)) + } if len(res) == 0 { return nil, nil } @@ -56,7 +60,7 @@ func NewGocritic() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go index 8e11775b2f8..4fb288989be 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go @@ -18,10 +18,10 @@ const gocycloName = "gocyclo" func NewGocyclo() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gocycloName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -43,18 +43,18 @@ func NewGocyclo() *goanalysis.Linter { return stats[i].Complexity > stats[j].Complexity }) - res := make([]result.Issue, 0, len(stats)) + res := make([]goanalysis.Issue, 0, len(stats)) for _, s := range stats { if s.Complexity <= lintCtx.Settings().Gocyclo.MinComplexity { break // Break as the stats is already sorted from greatest to least } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: s.Pos, Text: fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocyclo.MinComplexity), FromLinter: gocycloName, - }) + }, pass)) } mu.Lock() @@ -63,7 +63,7 @@ func NewGocyclo() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go index 0bde9ff2864..78d13f3bd4c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go @@ -17,10 +17,10 @@ const godoxName = "godox" func NewGodox() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: godoxName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -39,16 +39,16 @@ func NewGodox() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { - res[k] = result.Issue{ + res[k] = goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.Pos.Filename, Line: i.Pos.Line, }, Text: strings.TrimRight(i.Message, "\n"), FromLinter: godoxName, - } + }, pass) } mu.Lock() @@ -57,7 +57,7 @@ func NewGodox() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go index e3e3c44f47f..e71f27bb171 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go @@ -9,17 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const gofmtName = "gofmt" func NewGofmt() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gofmtName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -36,7 +35,7 @@ func NewGofmt() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var issues []result.Issue + var issues []goanalysis.Issue for _, f := range fileNames { diff, err := gofmtAPI.Run(f, lintCtx.Settings().Gofmt.Simplify) @@ -52,7 +51,9 @@ func NewGofmt() *goanalysis.Linter { return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) } - issues = append(issues, is...) + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } } if len(issues) == 0 { @@ -65,7 +66,7 @@ func NewGofmt() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go index 262b666d7df..90e19b0e85d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go @@ -10,17 +10,16 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const goimportsName = "goimports" func NewGoimports() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: goimportsName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -37,7 +36,7 @@ func NewGoimports() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var issues []result.Issue + var issues []goanalysis.Issue for _, f := range fileNames { diff, err := goimportsAPI.Run(f) @@ -53,7 +52,9 @@ func NewGoimports() *goanalysis.Linter { return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) } - issues = append(issues, is...) + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } } if len(issues) == 0 { @@ -66,7 +67,7 @@ func NewGoimports() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go index a77c6f1ba4d..3b1b1b66f1f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go @@ -4,6 +4,7 @@ import ( "fmt" "go/ast" "go/token" + "go/types" "sync" lintAPI "github.com/golangci/lint-1" @@ -14,9 +15,10 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet) ([]result.Issue, error) { +func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet, + typesPkg *types.Package, typesInfo *types.Info) ([]result.Issue, error) { l := new(lintAPI.Linter) - ps, err := l.LintASTFiles(files, fset) + ps, err := l.LintPkg(files, fset, typesPkg, typesInfo) if err != nil { return nil, fmt.Errorf("can't lint %d files: %s", len(files), err) } @@ -44,10 +46,10 @@ const golintName = "golint" func NewGolint() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: golintName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -57,18 +59,20 @@ func NewGolint() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset) + res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) if err != nil || len(res) == 0 { return nil, err } mu.Lock() - resIssues = append(resIssues, res...) + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go index e3551908cf7..acc0bee7891 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go @@ -22,14 +22,14 @@ const gosecName = "gosec" func NewGosec() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue gasConfig := gosec.NewConfig() enabledRules := rules.Generate() logger := log.New(ioutil.Discard, "", 0) analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gosecName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -53,7 +53,7 @@ func NewGosec() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence var r *result.Range @@ -67,7 +67,7 @@ func NewGosec() *goanalysis.Linter { line = r.From } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.File, Line: line, @@ -75,7 +75,7 @@ func NewGosec() *goanalysis.Linter { Text: text, LineRange: r, FromLinter: gosecName, - }) + }, pass)) } mu.Lock() @@ -84,7 +84,7 @@ func NewGosec() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go index fbc179a7699..7d755809d79 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go @@ -16,10 +16,10 @@ const ineffassignName = "ineffassign" func NewIneffassign() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: ineffassignName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,13 +40,13 @@ func NewIneffassign() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("ineffectual assignment to %s", formatCode(i.IdentName, lintCtx.Cfg)), FromLinter: ineffassignName, - }) + }, pass)) } mu.Lock() @@ -55,7 +55,7 @@ func NewIneffassign() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go index 485d35ee48d..96f6b5b9d46 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go @@ -16,10 +16,10 @@ const interfacerName = "interfacer" func NewInterfacer() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: interfacerName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, } @@ -45,14 +45,14 @@ func NewInterfacer() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { pos := pass.Fset.Position(i.Pos()) - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pos, Text: i.Message(), FromLinter: interfacerName, - }) + }, pass)) } mu.Lock() @@ -61,7 +61,7 @@ func NewInterfacer() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go index 182de66bcbf..c24b4d14814 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go @@ -77,10 +77,10 @@ const lllName = "lll" func NewLLL() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: lllName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -96,14 +96,16 @@ func NewLLL() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var res []result.Issue + var res []goanalysis.Issue spaces := strings.Repeat(" ", lintCtx.Settings().Lll.TabWidth) for _, f := range fileNames { issues, err := getLLLIssuesForFile(f, lintCtx.Settings().Lll.LineLength, spaces) if err != nil { return nil, err } - res = append(res, issues...) + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } } if len(res) == 0 { @@ -116,7 +118,7 @@ func NewLLL() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go index 775ffec8964..4f34f0ea18b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go @@ -15,9 +15,9 @@ import ( func NewMaligned() *goanalysis.Linter { const linterName = "maligned" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -34,17 +34,17 @@ func NewMaligned() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(malignedIssues)) + issues := make([]goanalysis.Issue, 0, len(malignedIssues)) for _, i := range malignedIssues { text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) if lintCtx.Settings().Maligned.SuggestNewOrder { text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, lintCtx.Cfg)) } - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: text, FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -52,7 +52,7 @@ func NewMaligned() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go index bcc9a608b8c..6cd421e5e93 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go @@ -56,11 +56,11 @@ const misspellName = "misspell" func NewMisspell() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue var ruleErr error analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: misspellName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -106,13 +106,15 @@ func NewMisspell() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var res []result.Issue + var res []goanalysis.Issue for _, f := range fileNames { issues, err := runMisspellOnFile(f, &r, lintCtx) if err != nil { return nil, err } - res = append(res, issues...) + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -124,7 +126,7 @@ func NewMisspell() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go index 2309ca14b06..86735a51ada 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go @@ -82,10 +82,10 @@ const nakedretName = "nakedret" func NewNakedret() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: nakedretName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -95,14 +95,16 @@ func NewNakedret() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { v := nakedretVisitor{ maxLength: lintCtx.Settings().Nakedret.MaxFuncLines, f: pass.Fset, } ast.Walk(&v, file) - res = append(res, v.issues...) + for i := range v.issues { + res = append(res, goanalysis.NewIssue(&v.issues[i], pass)) + } } if len(res) == 0 { @@ -115,7 +117,7 @@ func NewNakedret() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go index 49deaf46a39..168dc1713e9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go @@ -16,10 +16,10 @@ const preallocName = "prealloc" func NewPrealloc() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: preallocName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -31,14 +31,14 @@ func NewPrealloc() *goanalysis.Linter { s := &lintCtx.Settings().Prealloc analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue hints := prealloc.Check(pass.Files, s.Simple, s.RangeLoops, s.ForLoops) for _, hint := range hints { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pass.Fset.Position(hint.Pos), Text: fmt.Sprintf("Consider preallocating %s", formatCode(hint.DeclaredSliceName, lintCtx.Cfg)), FromLinter: preallocName, - }) + }, pass)) } if len(res) == 0 { @@ -51,7 +51,7 @@ func NewPrealloc() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go index 1544d48a8b7..309ff270802 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go @@ -17,10 +17,10 @@ const scopelintName = "scopelint" func NewScopelint() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: scopelintName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -47,12 +47,14 @@ func NewScopelint() *goanalysis.Linter { } mu.Lock() - resIssues = append(resIssues, res...) + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go index a2291683570..8acfc403aa7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go @@ -15,9 +15,9 @@ import ( func NewStructcheck() *goanalysis.Linter { const linterName = "structcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -35,13 +35,13 @@ func NewStructcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(structcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(structcheckIssues)) for _, i := range structcheckIssues { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, lintCtx.Cfg)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -49,7 +49,7 @@ func NewStructcheck() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go index ece8e9acb16..436530a8dbe 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go @@ -9,7 +9,7 @@ import ( func NewTypecheck() *goanalysis.Linter { const linterName = "typecheck" analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { return nil, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go index 04b69d3268d..147570170e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go @@ -14,9 +14,9 @@ import ( func NewUnconvert() *goanalysis.Linter { const linterName = "unconvert" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -33,13 +33,13 @@ func NewUnconvert() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(positions)) + issues := make([]goanalysis.Issue, 0, len(positions)) for _, pos := range positions { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pos, Text: "unnecessary conversion", FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -47,7 +47,7 @@ func NewUnconvert() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go index d89168edac7..866d0663e63 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go @@ -17,10 +17,10 @@ import ( func NewUnparam() *goanalysis.Linter { const linterName = "unparam" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, } @@ -56,13 +56,13 @@ func NewUnparam() *goanalysis.Linter { return nil, err } - var res []result.Issue + var res []goanalysis.Issue for _, i := range unparamIssues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pass.Fset.Position(i.Pos()), Text: i.Message(), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -71,7 +71,7 @@ func NewUnparam() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go index ab10a286a1b..8828ce9ba5f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go @@ -1,7 +1,10 @@ package golinters import ( + "go/types" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" "honnef.co/go/tools/unused" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" @@ -20,15 +23,22 @@ func NewUnused() *goanalysis.Linter { "Checks Go code for unused constants, variables, functions and types", analyzers, nil, - ).WithIssuesReporter(func(lintCtx *linter.Context) []result.Issue { - var issues []result.Issue + ).WithIssuesReporter(func(lintCtx *linter.Context) []goanalysis.Issue { + typesToPkg := map[*types.Package]*packages.Package{} + for _, pkg := range lintCtx.OriginalPackages { + typesToPkg[pkg.Types] = pkg + } + + var issues []goanalysis.Issue for _, ur := range u.Result() { p := u.ProblemObject(lintCtx.Packages[0].Fset, ur) - issues = append(issues, result.Issue{ + pkg := typesToPkg[ur.Pkg()] + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: name, Text: p.Message, Pos: p.Pos, - }) + Pkg: pkg, + }, nil)) } return issues }).WithContextSetter(func(lintCtx *linter.Context) { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go index e0ff0d67f7c..3c650d8c9d3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go @@ -15,9 +15,9 @@ import ( func NewVarcheck() *goanalysis.Linter { const linterName = "varcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -35,13 +35,13 @@ func NewVarcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(varcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(varcheckIssues)) for _, i := range varcheckIssues { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, lintCtx.Cfg)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -49,7 +49,7 @@ func NewVarcheck() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go index 5064cef2b4f..4a2ccce5d64 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go @@ -16,10 +16,10 @@ import ( func NewWhitespace() *goanalysis.Linter { const linterName = "whitespace" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -41,7 +41,7 @@ func NewWhitespace() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { issue := result.Issue{ Pos: token.Position{ @@ -70,7 +70,7 @@ func NewWhitespace() *goanalysis.Linter { } issue.Replacement.NewLines = []string{bracketLine} - res[k] = issue + res[k] = goanalysis.NewIssue(&issue, pass) //nolint:scopelint } mu.Lock() @@ -79,7 +79,7 @@ func NewWhitespace() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go index 4dcbf55e8b0..174a673baec 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go @@ -18,7 +18,7 @@ const ( // NewWSL returns a new WSL linter. func NewWSL() *goanalysis.Linter { var ( - issues []result.Issue + issues []goanalysis.Issue mu = sync.Mutex{} analyzer = &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, @@ -37,11 +37,13 @@ func NewWSL() *goanalysis.Linter { files = []string{} linterCfg = lintCtx.Cfg.LintersSettings.WSL processorCfg = wsl.Configuration{ - StrictAppend: linterCfg.StrictAppend, - AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, - AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + StrictAppend: linterCfg.StrictAppend, + AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, + AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, + AllowCaseTrailingWhitespace: linterCfg.AllowCaseTrailingWhitespace, + AllowCuddleDeclaration: linterCfg.AllowCuddleDeclaration, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, } ) @@ -60,16 +62,16 @@ func NewWSL() *goanalysis.Linter { defer mu.Unlock() for _, err := range wslErrors { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: name, Pos: err.Position, Text: err.Reason, - }) + }, pass)) } return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return issues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go index 700552c83e1..f2cd2bd57ce 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -76,6 +76,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config { WithPresets(linter.PresetBugs). WithURL("https://github.com/kisielk/errcheck"), linter.NewConfig(golinters.NewGolint()). + WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"), diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index fb7324ee071..62c97914e19 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -112,13 +112,16 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, specificLintCtx := *lintCtx specificLintCtx.Log = r.Log.Child(lc.Name()) + issues, err := lc.Linter.Run(ctx, &specificLintCtx) if err != nil { return nil, err } - for _, i := range issues { - i.FromLinter = lc.Name() + for i := range issues { + if issues[i].FromLinter == "" { + issues[i].FromLinter = lc.Name() + } } return issues, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go index 070887ccb78..b955417a87a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go @@ -14,18 +14,18 @@ type Log interface { type LogLevel int const ( - // debug message, write to debug logs only by logutils.Debug + // Debug messages, write to debug logs only by logutils.Debug. LogLevelDebug LogLevel = 0 - // information messages, don't write too much messages, - // only useful ones: they are shown when running with -v + // Information messages, don't write too much messages, + // only useful ones: they are shown when running with -v. LogLevelInfo LogLevel = 1 - // hidden errors: non critical errors: work can be continued, no need to fail whole program; + // Hidden errors: non critical errors: work can be continued, no need to fail whole program; // tests will crash if any warning occurred. LogLevelWarn LogLevel = 2 - // only not hidden from user errors: whole program failing, usually + // Only not hidden from user errors: whole program failing, usually // error logging happens in 1-2 places: in the "main" function. LogLevelError LogLevel = 3 ) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go index ca7a6ebc0a6..f36bc108adc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -43,7 +43,8 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { files := map[string]*checkstyleFile{} - for _, issue := range issues { + for i := range issues { + issue := &issues[i] file, ok := files[issue.FilePath()] if !ok { file = &checkstyleFile{ diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 8e184464b62..26878056884 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -32,7 +32,8 @@ func NewCodeClimate() *CodeClimate { func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { allIssues := []CodeClimateIssue{} - for _, i := range issues { + for ind := range issues { + i := &issues[ind] var issue CodeClimateIssue issue.Description = i.FromLinter + ": " + i.Text issue.Location.Path = i.Pos.Filename diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go index 46b14ef0779..b3d4280961c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -41,7 +41,8 @@ func NewJunitXML() *JunitXML { func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { suites := make(map[string]testSuiteXML) // use a map to group by file - for _, i := range issues { + for ind := range issues { + i := &issues[ind] suiteName := i.FilePath() testSuite := suites[suiteName] testSuite.Suite = i.FilePath() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go index 8d9974c8e03..d3cdce673dd 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -32,9 +32,8 @@ func (p Tab) SprintfColored(ca color.Attribute, format string, args ...interface func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { w := tabwriter.NewWriter(logutils.StdOut, 0, 0, 2, ' ', 0) - for _, i := range issues { - i := i - p.printIssue(&i, w) + for i := range issues { + p.printIssue(&issues[i], w) } if err := w.Flush(); err != nil { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go index 772b58da399..28349920533 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -37,16 +37,15 @@ func (p Text) SprintfColored(ca color.Attribute, format string, args ...interfac } func (p *Text) Print(ctx context.Context, issues []result.Issue) error { - for _, i := range issues { - i := i - p.printIssue(&i) + for i := range issues { + p.printIssue(&issues[i]) if !p.printIssuedLine { continue } - p.printSourceCode(&i) - p.printUnderLinePointer(&i) + p.printSourceCode(&issues[i]) + p.printUnderLinePointer(&issues[i]) } return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 7b1df59dd02..83ba705edfc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -2,6 +2,8 @@ package result import ( "go/token" + + "golang.org/x/tools/go/packages" ) type Range struct { @@ -23,18 +25,22 @@ type InlineFix struct { type Issue struct { FromLinter string Text string - Pos token.Position - - LineRange *Range `json:",omitempty"` - - // HunkPos is used only when golangci-lint is run over a diff - HunkPos int `json:",omitempty"` // Source lines of a code with the issue to show SourceLines []string // If we know how to fix the issue we can provide replacement lines Replacement *Replacement + + // Pkg is needed for proper caching of linting results + Pkg *packages.Package `json:"-"` + + LineRange *Range `json:",omitempty"` + + Pos token.Position + + // HunkPos is used only when golangci-lint is run over a diff + HunkPos int `json:",omitempty"` } func (i *Issue) FilePath() string { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 0ca027b8495..401c68dad12 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -44,13 +44,14 @@ func (f Fixer) Process(issues []result.Issue) []result.Issue { outIssues := make([]result.Issue, 0, len(issues)) issuesToFixPerFile := map[string][]result.Issue{} - for _, issue := range issues { + for i := range issues { + issue := &issues[i] if issue.Replacement == nil { - outIssues = append(outIssues, issue) + outIssues = append(outIssues, *issue) continue } - issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], issue) + issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) } for file, issuesToFix := range issuesToFixPerFile { @@ -87,8 +88,9 @@ func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { // merge multiple issues per line into one issue issuesPerLine := map[int][]result.Issue{} - for _, i := range issues { - issuesPerLine[i.Line()] = append(issuesPerLine[i.Line()], i) + for i := range issues { + issue := &issues[i] + issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) } issues = issues[:0] // reuse the same memory @@ -123,7 +125,8 @@ func (f Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileL } // check issues first - for _, i := range lineIssues { + for ind := range lineIssues { + i := &lineIssues[ind] if i.LineRange != nil { f.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) return &lineIssues[0] @@ -156,8 +159,8 @@ func (f Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, line // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" curOrigLinePos := 0 - for _, i := range lineIssues { - fix := i.Replacement.Inline + for i := range lineIssues { + fix := lineIssues[i].Replacement.Inline if fix.StartCol < curOrigLinePos { f.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) return nil @@ -188,14 +191,15 @@ func (f Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { var ret []result.Issue var currentEnd int - for _, issue := range issues { + for i := range issues { + issue := &issues[i] rng := issue.GetLineRange() if rng.From <= currentEnd { f.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) continue // skip intersecting issue } f.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) - ret = append(ret, issue) + ret = append(ret, *issue) currentEnd = rng.To } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go index 9d8ff9343dc..8bc3d847d65 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go @@ -1,17 +1,16 @@ package processors import ( - "fmt" + "github.com/pkg/errors" "github.com/golangci/golangci-lint/pkg/result" ) func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []result.Issue { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - if filter(&i) { - retIssues = append(retIssues, i) + for i := range issues { + if filter(&issues[i]) { + retIssues = append(retIssues, issues[i]) } } @@ -20,15 +19,14 @@ func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []re func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, error)) ([]result.Issue, error) { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - ok, err := filter(&i) + for i := range issues { + ok, err := filter(&issues[i]) if err != nil { - return nil, fmt.Errorf("can't filter issue %#v: %s", i, err) + return nil, errors.Wrapf(err, "can't filter issue %#v", issues[i]) } if ok { - retIssues = append(retIssues, i) + retIssues = append(retIssues, issues[i]) } } @@ -37,9 +35,8 @@ func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, func transformIssues(issues []result.Issue, transform func(i *result.Issue) *result.Issue) []result.Issue { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - newI := transform(&i) + for i := range issues { + newI := transform(&issues[i]) if newI != nil { retIssues = append(retIssues, *newI) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go index 78d1e3cdb0f..9628bd80f2b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go @@ -10,6 +10,8 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +const noStagesText = "no stages" + type Stopwatch struct { name string startedAt time.Time @@ -33,11 +35,7 @@ type stageDuration struct { d time.Duration } -func (s *Stopwatch) sprintStages() string { - if len(s.stages) == 0 { - return "no stages" - } - +func (s *Stopwatch) stageDurationsSorted() []stageDuration { stageDurations := []stageDuration{} for n, d := range s.stages { stageDurations = append(stageDurations, stageDuration{ @@ -48,6 +46,16 @@ func (s *Stopwatch) sprintStages() string { sort.Slice(stageDurations, func(i, j int) bool { return stageDurations[i].d > stageDurations[j].d }) + return stageDurations +} + +func (s *Stopwatch) sprintStages() string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + stagesStrings := []string{} for _, s := range stageDurations { stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) @@ -56,6 +64,22 @@ func (s *Stopwatch) sprintStages() string { return fmt.Sprintf("stages: %s", strings.Join(stagesStrings, ", ")) } +func (s *Stopwatch) sprintTopStages(n int) string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + + stagesStrings := []string{} + for i := 0; i < len(stageDurations) && i < n; i++ { + s := stageDurations[i] + stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) + } + + return fmt.Sprintf("top %d stages: %s", n, strings.Join(stagesStrings, ", ")) +} + func (s *Stopwatch) Print() { p := fmt.Sprintf("%s took %s", s.name, time.Since(s.startedAt)) if len(s.stages) == 0 { @@ -74,6 +98,14 @@ func (s *Stopwatch) PrintStages() { s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintStages()) } +func (s *Stopwatch) PrintTopStages(n int) { + var stagesDuration time.Duration + for _, s := range s.stages { + stagesDuration += s + } + s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintTopStages(n)) +} + func (s *Stopwatch) TrackStage(name string, f func()) { startedAt := time.Now() f() diff --git a/vendor/github.com/golangci/lint-1/lint.go b/vendor/github.com/golangci/lint-1/lint.go index de63631a38d..886c85bf099 100644 --- a/vendor/github.com/golangci/lint-1/lint.go +++ b/vendor/github.com/golangci/lint-1/lint.go @@ -118,10 +118,12 @@ func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { // LintFiles lints a set of files of a single package. // The argument is a map of filename to source. -func (l *Linter) LintASTFiles(files []*ast.File, fset *token.FileSet) ([]Problem, error) { +func (l *Linter) LintPkg(files []*ast.File, fset *token.FileSet, typesPkg *types.Package, typesInfo *types.Info) ([]Problem, error) { pkg := &pkg{ - fset: fset, - files: make(map[string]*file), + fset: fset, + files: make(map[string]*file), + typesPkg: typesPkg, + typesInfo: typesInfo, } var pkgName string for _, f := range files { @@ -193,25 +195,6 @@ type pkg struct { } func (p *pkg) lint() []Problem { - if err := p.typeCheck(); err != nil { - /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. - if e, ok := err.(types.Error); ok { - pos := p.fset.Position(e.Pos) - conf := 1.0 - if strings.Contains(e.Msg, "can't find import: ") { - // Golint is probably being run in a context that doesn't support - // typechecking (e.g. package files aren't found), so don't warn about it. - conf = 0 - } - if conf > 0 { - p.errorfAt(pos, conf, category("typechecking"), e.Msg) - } - - // TODO(dsymonds): Abort if !e.Soft? - } - */ - } - p.scanSortable() p.main = p.isMain() diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index d41c4e97e32..6e7a76e8c8b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -79,7 +79,6 @@ var ( asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true} asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true} asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false} - asmArchAmd64p32 = asmArch{name: "amd64p32", bigEndian: false, stack: "SP", lr: false} asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true} asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true} asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true} @@ -94,7 +93,6 @@ var ( &asmArchArm, &asmArchArm64, &asmArchAmd64, - &asmArchAmd64p32, &asmArchMips, &asmArchMipsLE, &asmArchMips64, @@ -635,9 +633,6 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri case "amd64.LEAQ": dst = 8 addr = true - case "amd64p32.LEAL": - dst = 4 - addr = true default: switch fn.arch.name { case "386", "amd64": diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 2a4e3a47449..cd31ab416ff 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -5,7 +5,10 @@ // The findcall package defines an Analyzer that serves as a trivial // example and test of the Analysis API. It reports a diagnostic for // every call to a function or method of the name specified by its -// -name flag. +// -name flag. It also exports a fact for each declaration that +// matches the name, plus a package-level fact if the package contained +// one or more such declarations. + package findcall import ( @@ -69,6 +72,10 @@ func run(pass *analysis.Pass) (interface{}, error) { } } + if len(pass.AllObjectFacts()) > 0 { + pass.ExportPackageFact(new(foundFact)) + } + return nil, nil } diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 98b3987b974..f8363d8faae 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -100,7 +100,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // Write writes encoded type information for the specified package to out. // The FileSet provides file position information for named objects. func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - b, err := gcimporter.BExportData(fset, pkg) + b, err := gcimporter.IExportData(fset, pkg) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index be671c79b70..5f00153f896 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -6,8 +6,6 @@ // This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; // see that file for specification of the format. -// +build go1.11 - package gcimporter import ( @@ -267,6 +265,11 @@ func (w *exportWriter) tag(tag byte) { } func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + p := w.p.fset.Position(pos) file := p.Filename line := int64(p.Line) @@ -394,7 +397,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.pos(f.Pos()) w.string(f.Name()) w.typ(f.Type(), pkg) - w.bool(f.Embedded()) + w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index a9c5733eba0..3af95f4a16a 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -143,12 +143,20 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] p.pkgIndex[pkg] = nameIndex pkgList[i] = pkg } + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } var localpkg *types.Package for _, pkg := range pkgList { if pkg.Path() == path { localpkg = pkg + break } } + if localpkg == nil { + localpkg = pkgList[0] + } names := make([]string, 0, len(p.pkgIndex[localpkg])) for name := range p.pkgIndex[localpkg] { diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index b21ff4e521e..0b99bc9ba16 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -257,6 +257,10 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func } } +type setNumable interface { + setNum(int) +} + // numberRegisters assigns numbers to all SSA registers // (value-defining Instructions) in f, to aid debugging. // (Non-Instruction Values are named at construction.) @@ -267,9 +271,7 @@ func numberRegisters(f *Function) { for _, instr := range b.Instrs { switch instr.(type) { case Value: - instr.(interface { - setNum(int) - }).setNum(v) + instr.(setNumable).setNum(v) v++ } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 0cd5db71551..73f8224c22c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -234,7 +234,7 @@ github.com/bflad/tfproviderlint/passes/schemaschema github.com/bgentry/go-netrc/netrc # github.com/bgentry/speakeasy v0.1.0 github.com/bgentry/speakeasy -# github.com/bombsimon/wsl v1.2.1 +# github.com/bombsimon/wsl v1.2.5 github.com/bombsimon/wsl # github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc github.com/boombuler/barcode @@ -278,6 +278,8 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b +github.com/gofrs/flock # github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys @@ -312,7 +314,7 @@ github.com/golangci/gocyclo/pkg/gocyclo # github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.20.0 +# github.com/golangci/golangci-lint v1.21.0 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache github.com/golangci/golangci-lint/internal/errorutil @@ -339,7 +341,7 @@ github.com/golangci/golangci-lint/pkg/result/processors github.com/golangci/golangci-lint/pkg/timeutils # github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc github.com/golangci/ineffassign -# github.com/golangci/lint-1 v0.0.0-20190930103755-fad67e08aa89 +# github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 github.com/golangci/lint-1 # github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca github.com/golangci/maligned @@ -663,7 +665,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0 +# golang.org/x/tools v0.0.0-20191010075000-0337d82405ff golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/internal/analysisflags golang.org/x/tools/go/analysis/internal/checker From 9e70755a8f648511f50d7401d1002ea399516b22 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 28 Oct 2019 12:27:06 -0400 Subject: [PATCH 36/55] provider: Add missing go.sum entries from renovate update of github.com/golangci/golangci-lint@v1.21.0 Reference: https://github.com/renovatebot/renovate/issues/3017 --- go.sum | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/go.sum b/go.sum index d9348733bfd..c046a9a6f88 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,7 @@ github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1U github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bombsimon/wsl v1.2.5 h1:9gTOkIwVtoDZywvX802SDHokeX4kW1cKnV8ZTVAPkRs= github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= @@ -116,6 +117,7 @@ github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2X github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b h1:ekuhfTjngPhisSjOJ0QWKpPQE8/rbknHaes6WVJj5Hw= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= @@ -152,9 +154,11 @@ github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70u github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.21.0 h1:HxAxpR8Z0M8omihvQdsD3PF0qPjlqYqp2vMJzstoKeI= github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc h1:gLLhTLMk2/SutryVJ6D4VZCU3CUqr8YloG7FPIBWFpI= github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= From 9214aeb5db7cdd342c98500c035ad3f9ca7430bf Mon Sep 17 00:00:00 2001 From: Anthony Nowell Date: Mon, 28 Oct 2019 13:11:28 -0700 Subject: [PATCH 37/55] S3: support MethodNotAllowed for Object Lock Configuration Some environments do not support Bucket Object Lock Configuration which was causing errors when reading the bucket configuration. This change allows us to ignore Object Lock Configuration if the endpoint responds with MethodNotAllowed. Resolves #10153 --- aws/resource_aws_s3_bucket.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/aws/resource_aws_s3_bucket.go b/aws/resource_aws_s3_bucket.go index 1ba3f64ea8f..02002eb4e40 100644 --- a/aws/resource_aws_s3_bucket.go +++ b/aws/resource_aws_s3_bucket.go @@ -2452,6 +2452,11 @@ func readS3ObjectLockConfiguration(conn *s3.S3, bucket string) ([]interface{}, e }) }) if err != nil { + // Certain S3 implementations do not include this API + if isAWSErr(err, "MethodNotAllowed", "") { + return nil, nil + } + if isAWSErr(err, "ObjectLockConfigurationNotFoundError", "") { return nil, nil } From 29c9b5e27f0b87bcd66933bfa932f7029e9e4e0d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 28 Oct 2019 18:31:11 -0400 Subject: [PATCH 38/55] Use StateUpgraders instead of MigrateState for Terraform 0.12 compatability. --- aws/resource_aws_dx_gateway_association.go | 8 +- ...urce_aws_dx_gateway_association_migrate.go | 86 ++++++++++++++++--- 2 files changed, 79 insertions(+), 15 deletions(-) diff --git a/aws/resource_aws_dx_gateway_association.go b/aws/resource_aws_dx_gateway_association.go index 84679da47a8..3c724d4d9b1 100644 --- a/aws/resource_aws_dx_gateway_association.go +++ b/aws/resource_aws_dx_gateway_association.go @@ -28,7 +28,13 @@ func resourceAwsDxGatewayAssociation() *schema.Resource { }, SchemaVersion: 1, - MigrateState: resourceAwsDxGatewayAssociationMigrateState, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceAwsDxGatewayAssociationResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: resourceAwsDxGatewayAssociationStateUpgradeV0, + Version: 0, + }, + }, Schema: map[string]*schema.Schema{ "allowed_prefixes": { diff --git a/aws/resource_aws_dx_gateway_association_migrate.go b/aws/resource_aws_dx_gateway_association_migrate.go index 80c9c12106a..b5555b5297d 100644 --- a/aws/resource_aws_dx_gateway_association_migrate.go +++ b/aws/resource_aws_dx_gateway_association_migrate.go @@ -6,27 +6,85 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/directconnect" - "github.com/hashicorp/terraform-plugin-sdk/terraform" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" ) -func resourceAwsDxGatewayAssociationMigrateState(v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { - switch v { - case 0: - log.Println("[INFO] Found Direct Connect gateway association state v0; migrating to v1") - return migrateDxGatewayAssociationStateV0toV1(is, meta) - default: - return is, fmt.Errorf("Unexpected schema version: %d", v) +func resourceAwsDxGatewayAssociationResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_prefixes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "associated_gateway_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_owner_account_id", "proposal_id", "vpn_gateway_id"}, + }, + + "associated_gateway_owner_account_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateAwsAccountId, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "associated_gateway_type": { + Type: schema.TypeString, + Computed: true, + }, + + "dx_gateway_association_id": { + Type: schema.TypeString, + Computed: true, + }, + + "dx_gateway_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "dx_gateway_owner_account_id": { + Type: schema.TypeString, + Computed: true, + }, + + "proposal_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "vpn_gateway_id"}, + }, + + "vpn_gateway_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"associated_gateway_id", "associated_gateway_owner_account_id", "proposal_id"}, + Deprecated: "use 'associated_gateway_id' argument instead", + }, + }, } } -func migrateDxGatewayAssociationStateV0toV1(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { +func resourceAwsDxGatewayAssociationStateUpgradeV0(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { conn := meta.(*AWSClient).dxconn + log.Println("[INFO] Found Direct Connect gateway association state v0; migrating to v1") + // dx_gateway_association_id was introduced in v2.8.0. Handle the case where it's not yet present. - if _, ok := is.Attributes["dx_gateway_association_id"]; !ok { + if _, ok := rawState["dx_gateway_association_id"]; !ok { resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ - DirectConnectGatewayId: aws.String(is.Attributes["dx_gateway_id"]), - VirtualGatewayId: aws.String(is.Attributes["vpn_gateway_id"]), + DirectConnectGatewayId: aws.String(rawState["dx_gateway_id"].(string)), + VirtualGatewayId: aws.String(rawState["vpn_gateway_id"].(string)), }) if err != nil { return nil, err @@ -36,8 +94,8 @@ func migrateDxGatewayAssociationStateV0toV1(is *terraform.InstanceState, meta in return nil, fmt.Errorf("Direct Connect gateway association not found, remove from state using 'terraform state rm'") } - is.Attributes["dx_gateway_association_id"] = aws.StringValue(resp.DirectConnectGatewayAssociations[0].AssociationId) + rawState["dx_gateway_association_id"] = aws.StringValue(resp.DirectConnectGatewayAssociations[0].AssociationId) } - return is, nil + return rawState, nil } From 86960009a30158fc8500b70056a7b53b1b758269 Mon Sep 17 00:00:00 2001 From: Renovate Bot Date: Mon, 28 Oct 2019 23:58:03 +0000 Subject: [PATCH 39/55] Update module aws/aws-sdk-go to v1.25.21 --- go.mod | 2 +- go.sum | 3 +- .../aws/aws-sdk-go/aws/client/client.go | 1 + .../aws-sdk-go/aws/client/default_retryer.go | 6 +- .../aws/client/metadata/client_info.go | 1 + .../github.com/aws/aws-sdk-go/aws/config.go | 14 + .../aws/aws-sdk-go/aws/csm/reporter.go | 1 - .../aws/aws-sdk-go/aws/endpoints/defaults.go | 181 +- .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 49 +- .../aws/endpoints/sts_legacy_regions.go | 19 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 40 +- .../aws/aws-sdk-go/aws/session/env_config.go | 32 +- .../aws/aws-sdk-go/aws/session/session.go | 131 +- .../aws-sdk-go/aws/session/shared_config.go | 21 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/acm/service.go | 5 +- .../aws/aws-sdk-go/service/acmpca/service.go | 5 +- .../aws/aws-sdk-go/service/amplify/service.go | 5 +- .../aws-sdk-go/service/apigateway/service.go | 5 +- .../service/apigatewayv2/service.go | 5 +- .../service/applicationautoscaling/service.go | 5 +- .../service/applicationinsights/service.go | 5 +- .../aws/aws-sdk-go/service/appmesh/api.go | 752 +++++-- .../aws/aws-sdk-go/service/appmesh/service.go | 5 +- .../aws-sdk-go/service/appstream/service.go | 5 +- .../aws/aws-sdk-go/service/appsync/service.go | 5 +- .../aws/aws-sdk-go/service/athena/service.go | 5 +- .../aws-sdk-go/service/autoscaling/service.go | 5 +- .../service/autoscalingplans/service.go | 5 +- .../aws/aws-sdk-go/service/backup/service.go | 5 +- .../aws/aws-sdk-go/service/batch/api.go | 68 +- .../aws/aws-sdk-go/service/batch/service.go | 5 +- .../aws/aws-sdk-go/service/budgets/service.go | 5 +- .../aws/aws-sdk-go/service/cloud9/service.go | 5 +- .../service/cloudformation/service.go | 5 +- .../aws-sdk-go/service/cloudfront/service.go | 5 +- .../aws-sdk-go/service/cloudhsmv2/service.go | 5 +- .../aws-sdk-go/service/cloudsearch/service.go | 5 +- .../aws-sdk-go/service/cloudtrail/service.go | 5 +- .../aws/aws-sdk-go/service/cloudwatch/api.go | 64 +- .../aws-sdk-go/service/cloudwatch/service.go | 5 +- .../service/cloudwatchevents/service.go | 5 +- .../service/cloudwatchlogs/service.go | 5 +- .../aws-sdk-go/service/codebuild/service.go | 5 +- .../aws-sdk-go/service/codecommit/service.go | 5 +- .../aws-sdk-go/service/codedeploy/service.go | 5 +- .../service/codepipeline/service.go | 5 +- .../service/cognitoidentity/service.go | 5 +- .../cognitoidentityprovider/service.go | 5 +- .../service/configservice/service.go | 5 +- .../costandusagereportservice/service.go | 5 +- .../databasemigrationservice/service.go | 5 +- .../service/datapipeline/service.go | 5 +- .../aws-sdk-go/service/datasync/service.go | 5 +- .../aws/aws-sdk-go/service/dax/service.go | 5 +- .../aws-sdk-go/service/devicefarm/service.go | 5 +- .../service/directconnect/service.go | 5 +- .../service/directoryservice/service.go | 5 +- .../aws/aws-sdk-go/service/dlm/service.go | 5 +- .../aws/aws-sdk-go/service/docdb/service.go | 5 +- .../aws-sdk-go/service/dynamodb/service.go | 5 +- .../aws/aws-sdk-go/service/ec2/api.go | 40 +- .../aws/aws-sdk-go/service/ec2/service.go | 5 +- .../aws/aws-sdk-go/service/ecr/api.go | 1184 ++++++++++- .../aws/aws-sdk-go/service/ecr/errors.go | 7 + .../aws/aws-sdk-go/service/ecr/service.go | 5 +- .../aws/aws-sdk-go/service/ecs/service.go | 5 +- .../aws/aws-sdk-go/service/efs/service.go | 5 +- .../aws/aws-sdk-go/service/eks/service.go | 5 +- .../aws/aws-sdk-go/service/elasticache/api.go | 354 ++++ .../aws-sdk-go/service/elasticache/errors.go | 12 + .../aws-sdk-go/service/elasticache/service.go | 5 +- .../service/elasticbeanstalk/service.go | 5 +- .../service/elasticsearchservice/service.go | 5 +- .../service/elastictranscoder/service.go | 5 +- .../aws/aws-sdk-go/service/elb/service.go | 5 +- .../aws/aws-sdk-go/service/elbv2/service.go | 5 +- .../aws/aws-sdk-go/service/emr/service.go | 5 +- .../aws-sdk-go/service/firehose/service.go | 5 +- .../aws/aws-sdk-go/service/fms/service.go | 5 +- .../service/forecastservice/service.go | 5 +- .../aws/aws-sdk-go/service/fsx/service.go | 5 +- .../aws/aws-sdk-go/service/gamelift/api.go | 75 + .../aws-sdk-go/service/gamelift/service.go | 5 +- .../aws/aws-sdk-go/service/glacier/service.go | 5 +- .../service/globalaccelerator/service.go | 5 +- .../aws/aws-sdk-go/service/glue/service.go | 5 +- .../aws-sdk-go/service/guardduty/service.go | 5 +- .../aws/aws-sdk-go/service/iam/service.go | 5 +- .../aws-sdk-go/service/inspector/service.go | 5 +- .../aws/aws-sdk-go/service/iot/service.go | 5 +- .../service/iotanalytics/service.go | 5 +- .../aws/aws-sdk-go/service/iotevents/api.go | 94 +- .../aws-sdk-go/service/iotevents/service.go | 5 +- .../aws/aws-sdk-go/service/kafka/api.go | 264 ++- .../aws/aws-sdk-go/service/kafka/service.go | 5 +- .../aws/aws-sdk-go/service/kinesis/api.go | 4 +- .../aws/aws-sdk-go/service/kinesis/service.go | 5 +- .../service/kinesisanalytics/service.go | 5 +- .../service/kinesisanalyticsv2/service.go | 5 +- .../service/kinesisvideo/service.go | 5 +- .../aws/aws-sdk-go/service/kms/service.go | 5 +- .../service/lakeformation/service.go | 5 +- .../aws/aws-sdk-go/service/lambda/service.go | 5 +- .../lexmodelbuildingservice/service.go | 5 +- .../service/licensemanager/service.go | 5 +- .../aws-sdk-go/service/lightsail/service.go | 5 +- .../aws/aws-sdk-go/service/macie/service.go | 5 +- .../service/managedblockchain/service.go | 5 +- .../service/mediaconnect/service.go | 5 +- .../service/mediaconvert/service.go | 5 +- .../aws-sdk-go/service/medialive/service.go | 5 +- .../service/mediapackage/service.go | 5 +- .../aws-sdk-go/service/mediastore/service.go | 5 +- .../service/mediastoredata/service.go | 5 +- .../aws/aws-sdk-go/service/mq/service.go | 5 +- .../aws/aws-sdk-go/service/neptune/service.go | 5 +- .../aws-sdk-go/service/opsworks/service.go | 5 +- .../service/organizations/service.go | 5 +- .../aws/aws-sdk-go/service/personalize/api.go | 69 +- .../aws-sdk-go/service/personalize/service.go | 5 +- .../aws-sdk-go/service/pinpoint/service.go | 5 +- .../aws/aws-sdk-go/service/pricing/service.go | 5 +- .../aws/aws-sdk-go/service/qldb/service.go | 5 +- .../aws-sdk-go/service/quicksight/service.go | 5 +- .../aws/aws-sdk-go/service/ram/service.go | 5 +- .../aws/aws-sdk-go/service/rds/api.go | 1729 ++++++++++++++++- .../aws/aws-sdk-go/service/rds/doc.go | 6 +- .../aws/aws-sdk-go/service/rds/errors.go | 46 +- .../aws/aws-sdk-go/service/rds/service.go | 5 +- .../aws-sdk-go/service/redshift/service.go | 5 +- .../service/resourcegroups/service.go | 5 +- .../aws/aws-sdk-go/service/route53/service.go | 5 +- .../service/route53resolver/service.go | 5 +- .../aws/aws-sdk-go/service/s3/api.go | 51 +- .../aws/aws-sdk-go/service/s3/service.go | 5 +- .../aws-sdk-go/service/s3control/service.go | 5 +- .../aws/aws-sdk-go/service/sagemaker/api.go | 72 + .../aws-sdk-go/service/sagemaker/service.go | 5 +- .../service/secretsmanager/service.go | 5 +- .../aws-sdk-go/service/securityhub/service.go | 5 +- .../service.go | 5 +- .../service/servicecatalog/service.go | 5 +- .../service/servicediscovery/service.go | 5 +- .../service/servicequotas/service.go | 5 +- .../aws/aws-sdk-go/service/ses/service.go | 5 +- .../aws/aws-sdk-go/service/sfn/service.go | 5 +- .../aws/aws-sdk-go/service/shield/service.go | 5 +- .../aws-sdk-go/service/simpledb/service.go | 5 +- .../aws/aws-sdk-go/service/sns/service.go | 5 +- .../aws/aws-sdk-go/service/sqs/service.go | 5 +- .../aws/aws-sdk-go/service/ssm/service.go | 5 +- .../service/storagegateway/service.go | 5 +- .../aws/aws-sdk-go/service/sts/api.go | 6 +- .../aws/aws-sdk-go/service/sts/errors.go | 6 +- .../aws/aws-sdk-go/service/sts/service.go | 5 +- .../aws/aws-sdk-go/service/swf/service.go | 5 +- .../aws/aws-sdk-go/service/transfer/api.go | 466 ++++- .../aws/aws-sdk-go/service/transfer/errors.go | 8 + .../aws-sdk-go/service/transfer/service.go | 5 +- .../aws/aws-sdk-go/service/waf/service.go | 5 +- .../aws-sdk-go/service/wafregional/service.go | 5 +- .../aws-sdk-go/service/worklink/service.go | 5 +- .../aws/aws-sdk-go/service/workspaces/api.go | 15 +- .../aws-sdk-go/service/workspaces/service.go | 5 +- .../aws/aws-sdk-go/service/xray/service.go | 5 +- vendor/modules.txt | 2 +- 167 files changed, 5783 insertions(+), 754 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go diff --git a/go.mod b/go.mod index 1823675ebc8..9de5169821b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.13 require ( github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 // indirect - github.com/aws/aws-sdk-go v1.25.10 + github.com/aws/aws-sdk-go v1.25.21 github.com/beevik/etree v1.1.0 github.com/bflad/tfproviderlint v0.5.0 github.com/client9/misspell v0.3.4 diff --git a/go.sum b/go.sum index c046a9a6f88..111429272a7 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,7 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.10 h1:3epJfNmP6xWkOpLOdhIIj07+9UAJwvbzq8bBzyPigI4= -github.com/aws/aws-sdk-go v1.25.10/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index c022407f57b..03334d69207 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -12,6 +12,7 @@ import ( type Config struct { Config *aws.Config Handlers request.Handlers + PartitionID string Endpoint string SigningRegion string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 0fda42510f0..9f6af19dd45 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -16,11 +16,11 @@ import ( type DefaultRetryer struct { // Num max Retries is the number of max retries that will be performed. // By default, this is zero. - NumMaxRetries int + NumMaxRetries int // MinRetryDelay is the minimum retry delay after which retry will be performed. // If not set, the value is 0ns. - MinRetryDelay time.Duration + MinRetryDelay time.Duration // MinThrottleRetryDelay is the minimum retry delay when throttled. // If not set, the value is 0ns. @@ -28,7 +28,7 @@ type DefaultRetryer struct { // MaxRetryDelay is the maximum retry delay before which retry must be performed. // If not set, the value is 0ns. - MaxRetryDelay time.Duration + MaxRetryDelay time.Duration // MaxThrottleDelay is the maximum retry delay when throttled. // If not set, the value is 0ns. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 920e9fddf87..0c48f72e08e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -5,6 +5,7 @@ type ClientInfo struct { ServiceName string ServiceID string APIVersion string + PartitionID string Endpoint string SigningName string SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index fd1e240f6eb..8a7699b9619 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -246,6 +246,9 @@ type Config struct { // Disabling this feature is useful when you want to use local endpoints // for testing that do not support the modeled host prefix pattern. DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint } // NewConfig returns a new Config pointer that can be chained with builder @@ -420,6 +423,13 @@ func (c *Config) MergeIn(cfgs ...*Config) { } } +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + func mergeInConfig(dst *Config, other *Config) { if other == nil { return @@ -520,6 +530,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableEndpointHostPrefix != nil { dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go index c7008d8c3fc..9186587fc04 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -66,7 +66,6 @@ func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { XAmzRequestID: aws.String(r.RequestID), - AttemptCount: aws.Int(r.RetryCount + 1), AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), AccessKey: aws.String(creds.AccessKeyID), } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1ceff2c13da..28ccafc8edb 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -209,6 +209,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -366,6 +367,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -461,6 +463,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -578,14 +581,20 @@ var awsPartition = partition{ "backup": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -756,6 +765,7 @@ var awsPartition = partition{ "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, @@ -1231,6 +1241,18 @@ var awsPartition = partition{ Region: "ap-northeast-2", }, }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, "ap-southeast-2": endpoint{ Hostname: "rds.ap-southeast-2.amazonaws.com", CredentialScope: credentialScope{ @@ -1668,6 +1690,28 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "forecast": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "forecastquery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "fsx": service{ Endpoints: endpoints{ @@ -2269,8 +2313,11 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -2827,6 +2874,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -3104,6 +3152,7 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ @@ -3163,6 +3212,9 @@ var awsPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{ + Protocols: []string{"https"}, + }, "ap-northeast-1": endpoint{ Protocols: []string{"https"}, }, @@ -3556,44 +3608,29 @@ var awsPartition = partition{ }, "sts": service{ PartitionEndpoint: "aws-global", - Defaults: endpoint{ - Hostname: "sts.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + Endpoints: endpoints{ - "ap-east-1": endpoint{ - Hostname: "sts.ap-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-east-1", - }, - }, + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, - }, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-north-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "eu-west-3": endpoint{}, - "me-south-1": endpoint{ - Hostname: "sts.me-south-1.amazonaws.com", + "aws-global": endpoint{ + Hostname: "sts.amazonaws.com", CredentialScope: credentialScope{ - Region: "me-south-1", + Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-1-fips": endpoint{ Hostname: "sts-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3681,6 +3718,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transcribestreaming": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "transfer": service{ Endpoints: endpoints{ @@ -4096,6 +4165,12 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "glue": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, "greengrass": service{ IsRegionalized: boxedTrue, Defaults: endpoint{ @@ -4333,6 +4408,25 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, }, } @@ -4919,6 +5013,13 @@ var awsusgovPartition = partition{ }, }, }, + "route53resolver": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "runtime.sagemaker": service{ Endpoints: endpoints{ @@ -5123,6 +5224,14 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "transcribe": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "translate": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -5189,6 +5298,12 @@ var awsisoPartition = partition{ }, }, }, + "api.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "apigateway": service{ Endpoints: endpoints{ @@ -5410,6 +5525,12 @@ var awsisoPartition = partition{ }, }, }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "s3": service{ Defaults: endpoint{ SignatureVersions: []string{"s3v4"}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 9c936be6cf9..fadff07d64c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -3,6 +3,7 @@ package endpoints import ( "fmt" "regexp" + "strings" "github.com/aws/aws-sdk-go/aws/awserr" ) @@ -46,6 +47,43 @@ type Options struct { // // This option is ignored if StrictMatching is enabled. ResolveUnknownService bool + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint +} + +// STSRegionalEndpoint is an enum type alias for int +// It is used internally by the core sdk as STS Regional Endpoint flag value +type STSRegionalEndpoint int + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } } // Set combines all of the option functions together. @@ -79,6 +117,12 @@ func ResolveUnknownServiceOption(o *Options) { o.ResolveUnknownService = true } +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + // A Resolver provides the interface for functionality to resolve endpoints. // The build in Partition and DefaultResolver return value satisfy this interface. type Resolver interface { @@ -194,7 +238,7 @@ func (p Partition) ID() string { return p.id } // require the provided service and region to be known by the partition. // If the endpoint cannot be strictly resolved an error will be returned. This // mode is useful to ensure the endpoint resolved is valid. Without -// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching enabled the endpoint returned may look valid but may not work. // StrictMatching requires the SDK to be updated if you want to take advantage // of new regions and services expansions. // @@ -350,6 +394,9 @@ type ResolvedEndpoint struct { // The endpoint URL URL string + // The endpoint partition + PartitionID string + // The region that should be used for signing requests. SigningRegion string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go new file mode 100644 index 00000000000..26139621972 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/sts_legacy_regions.go @@ -0,0 +1,19 @@ +package endpoints + +var stsLegacyGlobalRegions = map[string]struct{}{ + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 523ad79ac0a..7b09adff63e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -75,24 +75,55 @@ func (p partition) canResolveEndpoint(service, region string, strictMatch bool) return p.RegionRegex.MatchString(region) } +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { var opt Options opt.Set(opts...) s, hasService := p.Services[service] - if !(hasService || opt.ResolveUnknownService) { + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { // Only return error if the resolver will not fallback to creating // endpoint based on service endpoint ID passed in. return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) } + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if service == "sts" && opt.STSRegionalEndpoint != RegionalSTSEndpoint { + if _, ok := stsLegacyGlobalRegions[region]; ok { + region = "aws-global" + } + } + e, hasEndpoint := s.endpointForRegion(region) - if !hasEndpoint && opt.StrictMatching { + if len(region) == 0 || (!hasEndpoint && opt.StrictMatching) { return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) } defs := []endpoint{p.Defaults, s.Defaults} - return e.resolve(service, region, p.DNSSuffix, defs, opt), nil + + return e.resolve(service, p.ID, region, p.DNSSuffix, defs, opt), nil } func serviceList(ss services) []string { @@ -201,7 +232,7 @@ func getByPriority(s []string, p []string, def string) string { return s[0] } -func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { +func (e endpoint) resolve(service, partitionID, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { var merged endpoint for _, def := range defs { merged.mergeIn(def) @@ -237,6 +268,7 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op return ResolvedEndpoint{ URL: u, + PartitionID: partitionID, SigningRegion: signingRegion, SigningName: signingName, SigningNameDerived: signingNameDerived, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index 60a6f9ce2a4..530cc3a9c06 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -1,12 +1,14 @@ package session import ( + "fmt" "os" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // EnvProviderName provides a name of the provider when config is loaded from environment. @@ -125,6 +127,12 @@ type envConfig struct { // // AWS_ROLE_SESSION_NAME=session_name RoleSessionName string + + // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service + // + // AWS_STS_REGIONAL_ENDPOINTS =sts_regional_endpoint + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint } var ( @@ -179,6 +187,9 @@ var ( roleSessionNameEnvKey = []string{ "AWS_ROLE_SESSION_NAME", } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -187,7 +198,7 @@ var ( // If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value // the shared SDK config will be loaded in addition to the SDK's specific // configuration values. -func loadEnvConfig() envConfig { +func loadEnvConfig() (envConfig, error) { enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) return envConfigLoad(enableSharedConfig) } @@ -198,11 +209,11 @@ func loadEnvConfig() envConfig { // Loads the shared configuration in addition to the SDK's specific configuration. // This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` // environment variable is set. -func loadSharedEnvConfig() envConfig { +func loadSharedEnvConfig() (envConfig, error) { return envConfigLoad(true) } -func envConfigLoad(enableSharedConfig bool) envConfig { +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg := envConfig{} cfg.EnableSharedConfig = enableSharedConfig @@ -264,12 +275,23 @@ func envConfigLoad(enableSharedConfig bool) envConfig { cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") - return cfg + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + STSRegionalEndpoint, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + cfg.STSRegionalEndpoint = STSRegionalEndpoint + } + } + + return cfg, nil } func setFromEnvVal(dst *string, keys []string) { for _, k := range keys { - if v := os.Getenv(k); len(v) > 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 7b0a942e223..15fa647699f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -73,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { if l := s.Config.Logger; l != nil { @@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session { } else if csmCfg.Enabled { err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { - err = fmt.Errorf("failed to enable CSM, %v", err) - s.Config.Logger.Log("ERROR:", err.Error()) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) } } @@ -279,10 +276,17 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } if len(opts.Profile) != 0 { @@ -550,6 +554,9 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg) + // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { @@ -563,6 +570,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, return nil } +// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from +// envConfig and SharedConfig with envConfig being given precedence over SharedConfig +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + + cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint + + if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint + } + + if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint + } + return nil +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -591,37 +614,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil && s.Config.Logger != nil { + s.Config.Logger.Log(fmt.Sprintf( + "ERROR: unable to resolve endpoint for service %q, region %q, err: %v", + service, region, err)) } return client.Config{ @@ -631,7 +632,37 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -641,12 +672,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -658,3 +686,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index d91ac93a544..8574668960b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,6 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -40,6 +41,9 @@ const ( // Web Identity Token File webIdentityTokenFileKey = `web_identity_token_file` // optional + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. @@ -82,12 +86,17 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool - // CSM Options CSMEnabled *bool CSMHost string CSMPort string CSMClientID string + + // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service + // + // sts_regional_endpoints = sts_regional_endpoint + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint } type sharedConfigFile struct { @@ -244,8 +253,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.RoleSessionName, section, roleSessionNameKey) updateString(&cfg.SourceProfileName, section, sourceProfileKey) updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } } updateString(&cfg.CredentialProcess, section, credentialProcessKey) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 174bc4d013b..633784fe143 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.10" +const SDKVersion = "1.25.21" diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go index 9817d0c0a5a..ac0bee1943a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go @@ -46,11 +46,11 @@ const ( // svc := acm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACM { svc := &ACM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go index 6c231c1d700..c041442ae4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go @@ -46,11 +46,11 @@ const ( // svc := acmpca.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACMPCA { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACMPCA { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACMPCA { svc := &ACMPCA{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-22", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go index f7073bd9961..7d4fa8ea876 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Amplify { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "amplify" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Amplify { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Amplify { svc := &Amplify{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go index 8064d24fc02..cf398a11402 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -46,11 +46,11 @@ const ( // svc := apigateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *APIGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *APIGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *APIGateway { svc := &APIGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go index 02f80c40830..75199eb30aa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApiGatewayV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "apigateway" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApiGatewayV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApiGatewayV2 { svc := &ApiGatewayV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-29", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go index 902d81d426e..0872f66dde3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "application-autoscaling" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { svc := &ApplicationAutoScaling{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go index 72b8030bd46..b90f92dc11f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationInsights { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "applicationinsights" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApplicationInsights { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationInsights { svc := &ApplicationInsights{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-25", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go index 59daa7c77e8..b9c5476838e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go @@ -3240,11 +3240,11 @@ func (c *AppMesh) UpdateVirtualServiceWithContext(ctx aws.Context, input *Update return out, req.Send() } -// An object representing the access logging information for a virtual node. +// An object that represents the access logging information for a virtual node. type AccessLog struct { _ struct{} `type:"structure"` - // An object representing an access log file. + // An object that represents an access log file. File *FileAccessLog `locationName:"file" type:"structure"` } @@ -3279,8 +3279,8 @@ func (s *AccessLog) SetFile(v *FileAccessLog) *AccessLog { return s } -// An object representing the AWS Cloud Map attribute information for your virtual -// node. +// An object that represents the AWS Cloud Map attribute information for your +// virtual node. type AwsCloudMapInstanceAttribute struct { _ struct{} `type:"structure"` @@ -3335,8 +3335,8 @@ func (s *AwsCloudMapInstanceAttribute) SetValue(v string) *AwsCloudMapInstanceAt return s } -// An object representing the AWS Cloud Map service discovery information for -// your virtual node. +// An object that represents the AWS Cloud Map service discovery information +// for your virtual node. type AwsCloudMapServiceDiscovery struct { _ struct{} `type:"structure"` @@ -3409,12 +3409,12 @@ func (s *AwsCloudMapServiceDiscovery) SetServiceName(v string) *AwsCloudMapServi return s } -// An object representing the backends that a virtual node is expected to send -// outbound traffic to. +// An object that represents the backends that a virtual node is expected to +// send outbound traffic to. type Backend struct { _ struct{} `type:"structure"` - // An object representing a virtual service backend for a virtual node. + // An object that represents a virtual service backend for a virtual node. VirtualService *VirtualServiceBackend `locationName:"virtualService" type:"structure"` } @@ -3457,7 +3457,7 @@ type CreateMeshInput struct { // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` Tags []*TagRef `locationName:"tags" type:"list"` @@ -3531,7 +3531,7 @@ func (s *CreateMeshInput) SetTags(v []*TagRef) *CreateMeshInput { type CreateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -3564,7 +3564,7 @@ type CreateRouteInput struct { // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` @@ -3670,7 +3670,7 @@ func (s *CreateRouteInput) SetVirtualRouterName(v string) *CreateRouteInput { type CreateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -3700,7 +3700,7 @@ type CreateVirtualNodeInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` @@ -3794,7 +3794,7 @@ func (s *CreateVirtualNodeInput) SetVirtualNodeName(v string) *CreateVirtualNode type CreateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -3824,7 +3824,7 @@ type CreateVirtualRouterInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` @@ -3918,7 +3918,7 @@ func (s *CreateVirtualRouterInput) SetVirtualRouterName(v string) *CreateVirtual type CreateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -3948,7 +3948,7 @@ type CreateVirtualServiceInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` @@ -4039,7 +4039,7 @@ func (s *CreateVirtualServiceInput) SetVirtualServiceName(v string) *CreateVirtu type CreateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4103,7 +4103,7 @@ func (s *DeleteMeshInput) SetMeshName(v string) *DeleteMeshInput { type DeleteMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4197,7 +4197,7 @@ func (s *DeleteRouteInput) SetVirtualRouterName(v string) *DeleteRouteInput { type DeleteRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4276,7 +4276,7 @@ func (s *DeleteVirtualNodeInput) SetVirtualNodeName(v string) *DeleteVirtualNode type DeleteVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4355,7 +4355,7 @@ func (s *DeleteVirtualRouterInput) SetVirtualRouterName(v string) *DeleteVirtual type DeleteVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4434,7 +4434,7 @@ func (s *DeleteVirtualServiceInput) SetVirtualServiceName(v string) *DeleteVirtu type DeleteVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4498,7 +4498,7 @@ func (s *DescribeMeshInput) SetMeshName(v string) *DescribeMeshInput { type DescribeMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4592,7 +4592,7 @@ func (s *DescribeRouteInput) SetVirtualRouterName(v string) *DescribeRouteInput type DescribeRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4671,7 +4671,7 @@ func (s *DescribeVirtualNodeInput) SetVirtualNodeName(v string) *DescribeVirtual type DescribeVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4750,7 +4750,7 @@ func (s *DescribeVirtualRouterInput) SetVirtualRouterName(v string) *DescribeVir type DescribeVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4829,7 +4829,7 @@ func (s *DescribeVirtualServiceInput) SetVirtualServiceName(v string) *DescribeV type DescribeVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4851,8 +4851,8 @@ func (s *DescribeVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) return s } -// An object representing the DNS service discovery information for your virtual -// node. +// An object that represents the DNS service discovery information for your +// virtual node. type DnsServiceDiscovery struct { _ struct{} `type:"structure"` @@ -4889,7 +4889,7 @@ func (s *DnsServiceDiscovery) SetHostname(v string) *DnsServiceDiscovery { return s } -// An object representing the duration between retry attempts. +// An object that represents a duration of time. type Duration struct { _ struct{} `type:"structure"` @@ -4920,7 +4920,7 @@ func (s *Duration) SetValue(v int64) *Duration { return s } -// An object representing the egress filter rules for a service mesh. +// An object that represents the egress filter rules for a service mesh. type EgressFilter struct { _ struct{} `type:"structure"` @@ -4957,7 +4957,7 @@ func (s *EgressFilter) SetType(v string) *EgressFilter { return s } -// An object representing an access log file. +// An object that represents an access log file. type FileAccessLog struct { _ struct{} `type:"structure"` @@ -4997,8 +4997,436 @@ func (s *FileAccessLog) SetPath(v string) *FileAccessLog { return s } -// An object representing the method and value to match the header value sent -// with a request. Specify one match method. +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type GrpcRetryPolicy struct { + _ struct{} `type:"structure"` + + GrpcRetryEvents []*string `locationName:"grpcRetryEvents" min:"1" type:"list"` + + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` + + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` + + // An object that represents a duration of time. + // + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` + + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` +} + +// String returns the string representation +func (s GrpcRetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRetryPolicy"} + if s.GrpcRetryEvents != nil && len(s.GrpcRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrpcRetryEvents", 1)) + } + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) + } + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) + } + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) + } + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrpcRetryEvents sets the GrpcRetryEvents field's value. +func (s *GrpcRetryPolicy) SetGrpcRetryEvents(v []*string) *GrpcRetryPolicy { + s.GrpcRetryEvents = v + return s +} + +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetHttpRetryEvents(v []*string) *GrpcRetryPolicy { + s.HttpRetryEvents = v + return s +} + +// SetMaxRetries sets the MaxRetries field's value. +func (s *GrpcRetryPolicy) SetMaxRetries(v int64) *GrpcRetryPolicy { + s.MaxRetries = &v + return s +} + +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *GrpcRetryPolicy) SetPerRetryTimeout(v *Duration) *GrpcRetryPolicy { + s.PerRetryTimeout = v + return s +} + +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetTcpRetryEvents(v []*string) *GrpcRetryPolicy { + s.TcpRetryEvents = v + return s +} + +// An object that represents a GRPC route type. +type GrpcRoute struct { + _ struct{} `type:"structure"` + + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *GrpcRouteAction `locationName:"action" type:"structure" required:"true"` + + // An object that represents the criteria for determining a request match. + // + // Match is a required field + Match *GrpcRouteMatch `locationName:"match" type:"structure" required:"true"` + + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *GrpcRetryPolicy `locationName:"retryPolicy" type:"structure"` +} + +// String returns the string representation +func (s GrpcRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRoute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *GrpcRoute) SetAction(v *GrpcRouteAction) *GrpcRoute { + s.Action = v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRoute) SetMatch(v *GrpcRouteMatch) *GrpcRoute { + s.Match = v + return s +} + +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *GrpcRoute) SetRetryPolicy(v *GrpcRetryPolicy) *GrpcRoute { + s.RetryPolicy = v + return s +} + +// An object that represents the action to take if a match is determined. +type GrpcRouteAction struct { + _ struct{} `type:"structure"` + + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *GrpcRouteAction) SetWeightedTargets(v []*WeightedTarget) *GrpcRouteAction { + s.WeightedTargets = v + return s +} + +// An object that represents the criteria for determining a request match. +type GrpcRouteMatch struct { + _ struct{} `type:"structure"` + + Metadata []*GrpcRouteMetadata `locationName:"metadata" min:"1" type:"list"` + + MethodName *string `locationName:"methodName" min:"1" type:"string"` + + ServiceName *string `locationName:"serviceName" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMatch"} + if s.Metadata != nil && len(s.Metadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Metadata", 1)) + } + if s.MethodName != nil && len(*s.MethodName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MethodName", 1)) + } + if s.Metadata != nil { + for i, v := range s.Metadata { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metadata", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetadata sets the Metadata field's value. +func (s *GrpcRouteMatch) SetMetadata(v []*GrpcRouteMetadata) *GrpcRouteMatch { + s.Metadata = v + return s +} + +// SetMethodName sets the MethodName field's value. +func (s *GrpcRouteMatch) SetMethodName(v string) *GrpcRouteMatch { + s.MethodName = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *GrpcRouteMatch) SetServiceName(v string) *GrpcRouteMatch { + s.ServiceName = &v + return s +} + +// An object that represents the match metadata for the route. +type GrpcRouteMetadata struct { + _ struct{} `type:"structure"` + + Invert *bool `locationName:"invert" type:"boolean"` + + // An object that represents the match method. Specify one of the match values. + Match *GrpcRouteMetadataMatchMethod `locationName:"match" type:"structure"` + + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadata) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadata"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvert sets the Invert field's value. +func (s *GrpcRouteMetadata) SetInvert(v bool) *GrpcRouteMetadata { + s.Invert = &v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRouteMetadata) SetMatch(v *GrpcRouteMetadataMatchMethod) *GrpcRouteMetadata { + s.Match = v + return s +} + +// SetName sets the Name field's value. +func (s *GrpcRouteMetadata) SetName(v string) *GrpcRouteMetadata { + s.Name = &v + return s +} + +// An object that represents the match method. Specify one of the match values. +type GrpcRouteMetadataMatchMethod struct { + _ struct{} `type:"structure"` + + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMetadataMatchMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadataMatchMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadataMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadataMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExact sets the Exact field's value. +func (s *GrpcRouteMetadataMatchMethod) SetExact(v string) *GrpcRouteMetadataMatchMethod { + s.Exact = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetPrefix(v string) *GrpcRouteMetadataMatchMethod { + s.Prefix = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRange(v *MatchRange) *GrpcRouteMetadataMatchMethod { + s.Range = v + return s +} + +// SetRegex sets the Regex field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRegex(v string) *GrpcRouteMetadataMatchMethod { + s.Regex = &v + return s +} + +// SetSuffix sets the Suffix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetSuffix(v string) *GrpcRouteMetadataMatchMethod { + s.Suffix = &v + return s +} + +// An object that represents the method and value to match with the header value +// sent in a request. Specify one match method. type HeaderMatchMethod struct { _ struct{} `type:"structure"` @@ -5006,9 +5434,10 @@ type HeaderMatchMethod struct { Prefix *string `locationName:"prefix" min:"1" type:"string"` - // The range of values to match on. The first character of the range is included - // in the range, though the last character is not. For example, if the range - // specified were 1-100, only values 1-99 would be matched. + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. Range *MatchRange `locationName:"range" type:"structure"` Regex *string `locationName:"regex" min:"1" type:"string"` @@ -5083,7 +5512,7 @@ func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { return s } -// An object representing the health check policy for a virtual node's listener. +// An object that represents the health check policy for a virtual node's listener. type HealthCheckPolicy struct { _ struct{} `type:"structure"` @@ -5199,7 +5628,9 @@ func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { return s } -// An object that represents a retry policy. +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. type HttpRetryPolicy struct { _ struct{} `type:"structure"` @@ -5208,7 +5639,7 @@ type HttpRetryPolicy struct { // MaxRetries is a required field MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` - // An object representing the duration between retry attempts. + // An object that represents a duration of time. // // PerRetryTimeout is a required field PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` @@ -5272,23 +5703,24 @@ func (s *HttpRetryPolicy) SetTcpRetryEvents(v []*string) *HttpRetryPolicy { return s } -// An object representing the HTTP routing specification for a route. +// An object that represents an HTTP or HTTP2 route type. type HttpRoute struct { _ struct{} `type:"structure"` - // An object representing the traffic distribution requirements for matched - // HTTP requests. + // An object that represents the action to take if a match is determined. // // Action is a required field Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` - // An object representing the requirements for a route to match HTTP requests + // An object that represents the requirements for a route to match HTTP requests // for a virtual router. // // Match is a required field Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` - // An object that represents a retry policy. + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. RetryPolicy *HttpRetryPolicy `locationName:"retryPolicy" type:"structure"` } @@ -5351,8 +5783,7 @@ func (s *HttpRoute) SetRetryPolicy(v *HttpRetryPolicy) *HttpRoute { return s } -// An object representing the traffic distribution requirements for matched -// HTTP requests. +// An object that represents the action to take if a match is determined. type HttpRouteAction struct { _ struct{} `type:"structure"` @@ -5402,14 +5833,14 @@ func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteActi return s } -// An object representing the HTTP header in the request. +// An object that represents the HTTP header in the request. type HttpRouteHeader struct { _ struct{} `type:"structure"` Invert *bool `locationName:"invert" type:"boolean"` - // An object representing the method and value to match the header value sent - // with a request. Specify one match method. + // An object that represents the method and value to match with the header value + // sent in a request. Specify one match method. Match *HeaderMatchMethod `locationName:"match" type:"structure"` // Name is a required field @@ -5465,7 +5896,7 @@ func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { return s } -// An object representing the requirements for a route to match HTTP requests +// An object that represents the requirements for a route to match HTTP requests // for a virtual router. type HttpRouteMatch struct { _ struct{} `type:"structure"` @@ -6071,14 +6502,14 @@ func (s *ListVirtualServicesOutput) SetVirtualServices(v []*VirtualServiceRef) * return s } -// An object representing a listener for a virtual node. +// An object that represents a listener for a virtual node. type Listener struct { _ struct{} `type:"structure"` - // An object representing the health check policy for a virtual node's listener. + // An object that represents the health check policy for a virtual node's listener. HealthCheck *HealthCheckPolicy `locationName:"healthCheck" type:"structure"` - // An object representing a virtual node or virtual router listener port mapping. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -6129,11 +6560,11 @@ func (s *Listener) SetPortMapping(v *PortMapping) *Listener { return s } -// An object representing the logging information for a virtual node. +// An object that represents the logging information for a virtual node. type Logging struct { _ struct{} `type:"structure"` - // An object representing the access logging information for a virtual node. + // An object that represents the access logging information for a virtual node. AccessLog *AccessLog `locationName:"accessLog" type:"structure"` } @@ -6168,9 +6599,10 @@ func (s *Logging) SetAccessLog(v *AccessLog) *Logging { return s } -// The range of values to match on. The first character of the range is included -// in the range, though the last character is not. For example, if the range -// specified were 1-100, only values 1-99 would be matched. +// An object that represents the range of values to match on. The first character +// of the range is included in the range, though the last character is not. +// For example, if the range specified were 1-100, only values 1-99 would be +// matched. type MatchRange struct { _ struct{} `type:"structure"` @@ -6219,24 +6651,24 @@ func (s *MatchRange) SetStart(v int64) *MatchRange { return s } -// An object representing a service mesh returned by a describe operation. +// An object that represents a service mesh returned by a describe operation. type MeshData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. // // Spec is a required field Spec *MeshSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a service mesh. + // An object that represents the status of a service mesh. // // Status is a required field Status *MeshStatus `locationName:"status" type:"structure" required:"true"` @@ -6276,7 +6708,7 @@ func (s *MeshData) SetStatus(v *MeshStatus) *MeshData { return s } -// An object representing a service mesh returned by a list operation. +// An object that represents a service mesh returned by a list operation. type MeshRef struct { _ struct{} `type:"structure"` @@ -6309,11 +6741,11 @@ func (s *MeshRef) SetMeshName(v string) *MeshRef { return s } -// An object representing the specification of a service mesh. +// An object that represents the specification of a service mesh. type MeshSpec struct { _ struct{} `type:"structure"` - // An object representing the egress filter rules for a service mesh. + // An object that represents the egress filter rules for a service mesh. EgressFilter *EgressFilter `locationName:"egressFilter" type:"structure"` } @@ -6348,7 +6780,7 @@ func (s *MeshSpec) SetEgressFilter(v *EgressFilter) *MeshSpec { return s } -// An object representing the status of a service mesh. +// An object that represents the status of a service mesh. type MeshStatus struct { _ struct{} `type:"structure"` @@ -6371,7 +6803,7 @@ func (s *MeshStatus) SetStatus(v string) *MeshStatus { return s } -// An object representing a virtual node or virtual router listener port mapping. +// An object that represents a port mapping. type PortMapping struct { _ struct{} `type:"structure"` @@ -6423,7 +6855,7 @@ func (s *PortMapping) SetProtocol(v string) *PortMapping { return s } -// An object representing metadata for a resource. +// An object that represents metadata for a resource. type ResourceMetadata struct { _ struct{} `type:"structure"` @@ -6483,14 +6915,14 @@ func (s *ResourceMetadata) SetVersion(v int64) *ResourceMetadata { return s } -// An object representing a route returned by a describe operation. +// An object that represents a route returned by a describe operation. type RouteData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` @@ -6498,12 +6930,12 @@ type RouteData struct { // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the current status of a route. + // An object that represents the current status of a route. // // Status is a required field Status *RouteStatus `locationName:"status" type:"structure" required:"true"` @@ -6558,7 +6990,7 @@ func (s *RouteData) SetVirtualRouterName(v string) *RouteData { return s } -// An object representing a route returned by a list operation. +// An object that represents a route returned by a list operation. type RouteRef struct { _ struct{} `type:"structure"` @@ -6609,16 +7041,22 @@ func (s *RouteRef) SetVirtualRouterName(v string) *RouteRef { return s } -// An object representing the specification of a route. +// An object that represents a route specification. Specify one route type. type RouteSpec struct { _ struct{} `type:"structure"` - // An object representing the HTTP routing specification for a route. + // An object that represents a GRPC route type. + GrpcRoute *GrpcRoute `locationName:"grpcRoute" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. + Http2Route *HttpRoute `locationName:"http2Route" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` Priority *int64 `locationName:"priority" type:"integer"` - // An object representing the TCP routing specification for a route. + // An object that represents a TCP route type. TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` } @@ -6635,6 +7073,16 @@ func (s RouteSpec) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RouteSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RouteSpec"} + if s.GrpcRoute != nil { + if err := s.GrpcRoute.Validate(); err != nil { + invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) + } + } + if s.Http2Route != nil { + if err := s.Http2Route.Validate(); err != nil { + invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) + } + } if s.HttpRoute != nil { if err := s.HttpRoute.Validate(); err != nil { invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) @@ -6652,6 +7100,18 @@ func (s *RouteSpec) Validate() error { return nil } +// SetGrpcRoute sets the GrpcRoute field's value. +func (s *RouteSpec) SetGrpcRoute(v *GrpcRoute) *RouteSpec { + s.GrpcRoute = v + return s +} + +// SetHttp2Route sets the Http2Route field's value. +func (s *RouteSpec) SetHttp2Route(v *HttpRoute) *RouteSpec { + s.Http2Route = v + return s +} + // SetHttpRoute sets the HttpRoute field's value. func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { s.HttpRoute = v @@ -6670,7 +7130,7 @@ func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { return s } -// An object representing the current status of a route. +// An object that represents the current status of a route. type RouteStatus struct { _ struct{} `type:"structure"` @@ -6694,16 +7154,17 @@ func (s *RouteStatus) SetStatus(v string) *RouteStatus { return s } -// An object representing the service discovery information for a virtual node. +// An object that represents the service discovery information for a virtual +// node. type ServiceDiscovery struct { _ struct{} `type:"structure"` - // An object representing the AWS Cloud Map service discovery information for - // your virtual node. + // An object that represents the AWS Cloud Map service discovery information + // for your virtual node. AwsCloudMap *AwsCloudMapServiceDiscovery `locationName:"awsCloudMap" type:"structure"` - // An object representing the DNS service discovery information for your virtual - // node. + // An object that represents the DNS service discovery information for your + // virtual node. Dns *DnsServiceDiscovery `locationName:"dns" type:"structure"` } @@ -6872,12 +7333,11 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// An object representing the TCP routing specification for a route. +// An object that represents a TCP route type. type TcpRoute struct { _ struct{} `type:"structure"` - // An object representing the traffic distribution requirements for matched - // TCP requests. + // An object that represents the action to take if a match is determined. // // Action is a required field Action *TcpRouteAction `locationName:"action" type:"structure" required:"true"` @@ -6917,8 +7377,7 @@ func (s *TcpRoute) SetAction(v *TcpRouteAction) *TcpRoute { return s } -// An object representing the traffic distribution requirements for matched -// TCP requests. +// An object that represents the action to take if a match is determined. type TcpRouteAction struct { _ struct{} `type:"structure"` @@ -7038,7 +7497,7 @@ type UpdateMeshInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` } @@ -7094,7 +7553,7 @@ func (s *UpdateMeshInput) SetSpec(v *MeshSpec) *UpdateMeshInput { type UpdateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -7127,7 +7586,7 @@ type UpdateRouteInput struct { // RouteName is a required field RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` @@ -7215,7 +7674,7 @@ func (s *UpdateRouteInput) SetVirtualRouterName(v string) *UpdateRouteInput { type UpdateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -7245,7 +7704,7 @@ type UpdateVirtualNodeInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` @@ -7321,7 +7780,7 @@ func (s *UpdateVirtualNodeInput) SetVirtualNodeName(v string) *UpdateVirtualNode type UpdateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -7351,7 +7810,7 @@ type UpdateVirtualRouterInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` @@ -7427,7 +7886,7 @@ func (s *UpdateVirtualRouterInput) SetVirtualRouterName(v string) *UpdateVirtual type UpdateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -7457,7 +7916,7 @@ type UpdateVirtualServiceInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` @@ -7533,7 +7992,7 @@ func (s *UpdateVirtualServiceInput) SetVirtualServiceName(v string) *UpdateVirtu type UpdateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -7555,24 +8014,24 @@ func (s *UpdateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *U return s } -// An object representing a virtual node returned by a describe operation. +// An object that represents a virtual node returned by a describe operation. type VirtualNodeData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the current status of the virtual node. + // An object that represents the current status of the virtual node. // // Status is a required field Status *VirtualNodeStatus `locationName:"status" type:"structure" required:"true"` @@ -7621,7 +8080,7 @@ func (s *VirtualNodeData) SetVirtualNodeName(v string) *VirtualNodeData { return s } -// An object representing a virtual node returned by a list operation. +// An object that represents a virtual node returned by a list operation. type VirtualNodeRef struct { _ struct{} `type:"structure"` @@ -7663,7 +8122,7 @@ func (s *VirtualNodeRef) SetVirtualNodeName(v string) *VirtualNodeRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualNodeServiceProvider struct { _ struct{} `type:"structure"` @@ -7703,7 +8162,7 @@ func (s *VirtualNodeServiceProvider) SetVirtualNodeName(v string) *VirtualNodeSe return s } -// An object representing the specification of a virtual node. +// An object that represents the specification of a virtual node. type VirtualNodeSpec struct { _ struct{} `type:"structure"` @@ -7711,10 +8170,11 @@ type VirtualNodeSpec struct { Listeners []*Listener `locationName:"listeners" type:"list"` - // An object representing the logging information for a virtual node. + // An object that represents the logging information for a virtual node. Logging *Logging `locationName:"logging" type:"structure"` - // An object representing the service discovery information for a virtual node. + // An object that represents the service discovery information for a virtual + // node. ServiceDiscovery *ServiceDiscovery `locationName:"serviceDiscovery" type:"structure"` } @@ -7792,7 +8252,7 @@ func (s *VirtualNodeSpec) SetServiceDiscovery(v *ServiceDiscovery) *VirtualNodeS return s } -// An object representing the current status of the virtual node. +// An object that represents the current status of the virtual node. type VirtualNodeStatus struct { _ struct{} `type:"structure"` @@ -7816,24 +8276,24 @@ func (s *VirtualNodeStatus) SetStatus(v string) *VirtualNodeStatus { return s } -// An object representing a virtual router returned by a describe operation. +// An object that represents a virtual router returned by a describe operation. type VirtualRouterData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a virtual router. + // An object that represents the status of a virtual router. // // Status is a required field Status *VirtualRouterStatus `locationName:"status" type:"structure" required:"true"` @@ -7882,11 +8342,11 @@ func (s *VirtualRouterData) SetVirtualRouterName(v string) *VirtualRouterData { return s } -// An object representing a virtual router listener. +// An object that represents a virtual router listener. type VirtualRouterListener struct { _ struct{} `type:"structure"` - // An object representing a virtual node or virtual router listener port mapping. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -7926,7 +8386,7 @@ func (s *VirtualRouterListener) SetPortMapping(v *PortMapping) *VirtualRouterLis return s } -// An object representing a virtual router returned by a list operation. +// An object that represents a virtual router returned by a list operation. type VirtualRouterRef struct { _ struct{} `type:"structure"` @@ -7968,7 +8428,7 @@ func (s *VirtualRouterRef) SetVirtualRouterName(v string) *VirtualRouterRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualRouterServiceProvider struct { _ struct{} `type:"structure"` @@ -8008,7 +8468,7 @@ func (s *VirtualRouterServiceProvider) SetVirtualRouterName(v string) *VirtualRo return s } -// An object representing the specification of a virtual router. +// An object that represents the specification of a virtual router. type VirtualRouterSpec struct { _ struct{} `type:"structure"` @@ -8054,7 +8514,7 @@ func (s *VirtualRouterSpec) SetListeners(v []*VirtualRouterListener) *VirtualRou return s } -// An object representing the status of a virtual router. +// An object that represents the status of a virtual router. type VirtualRouterStatus struct { _ struct{} `type:"structure"` @@ -8078,7 +8538,7 @@ func (s *VirtualRouterStatus) SetStatus(v string) *VirtualRouterStatus { return s } -// An object representing a virtual service backend for a virtual node. +// An object that represents a virtual service backend for a virtual node. type VirtualServiceBackend struct { _ struct{} `type:"structure"` @@ -8115,24 +8575,24 @@ func (s *VirtualServiceBackend) SetVirtualServiceName(v string) *VirtualServiceB return s } -// An object representing a virtual service returned by a describe operation. +// An object that represents a virtual service returned by a describe operation. type VirtualServiceData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a virtual service. + // An object that represents the status of a virtual service. // // Status is a required field Status *VirtualServiceStatus `locationName:"status" type:"structure" required:"true"` @@ -8181,14 +8641,14 @@ func (s *VirtualServiceData) SetVirtualServiceName(v string) *VirtualServiceData return s } -// An object representing the provider for a virtual service. +// An object that represents the provider for a virtual service. type VirtualServiceProvider struct { _ struct{} `type:"structure"` - // An object representing a virtual node service provider. + // An object that represents a virtual node service provider. VirtualNode *VirtualNodeServiceProvider `locationName:"virtualNode" type:"structure"` - // An object representing a virtual node service provider. + // An object that represents a virtual node service provider. VirtualRouter *VirtualRouterServiceProvider `locationName:"virtualRouter" type:"structure"` } @@ -8234,7 +8694,7 @@ func (s *VirtualServiceProvider) SetVirtualRouter(v *VirtualRouterServiceProvide return s } -// An object representing a virtual service returned by a list operation. +// An object that represents a virtual service returned by a list operation. type VirtualServiceRef struct { _ struct{} `type:"structure"` @@ -8276,11 +8736,11 @@ func (s *VirtualServiceRef) SetVirtualServiceName(v string) *VirtualServiceRef { return s } -// An object representing the specification of a virtual service. +// An object that represents the specification of a virtual service. type VirtualServiceSpec struct { _ struct{} `type:"structure"` - // An object representing the provider for a virtual service. + // An object that represents the provider for a virtual service. Provider *VirtualServiceProvider `locationName:"provider" type:"structure"` } @@ -8315,7 +8775,7 @@ func (s *VirtualServiceSpec) SetProvider(v *VirtualServiceProvider) *VirtualServ return s } -// An object representing the status of a virtual service. +// An object that represents the status of a virtual service. type VirtualServiceStatus struct { _ struct{} `type:"structure"` @@ -8339,10 +8799,11 @@ func (s *VirtualServiceStatus) SetStatus(v string) *VirtualServiceStatus { return s } -// An object representing a target and its relative weight. Traffic is distributed +// An object that represents a target and its relative weight. Traffic is distributed // across targets according to their relative weight. For example, a weighted // target with a relative weight of 50 receives five times as much traffic as -// one with a relative weight of 10. +// one with a relative weight of 10. The total weight for all targets combined +// must be less than or equal to 100. type WeightedTarget struct { _ struct{} `type:"structure"` @@ -8410,6 +8871,23 @@ const ( EgressFilterTypeDropAll = "DROP_ALL" ) +const ( + // GrpcRetryPolicyEventCancelled is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventCancelled = "cancelled" + + // GrpcRetryPolicyEventDeadlineExceeded is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventDeadlineExceeded = "deadline-exceeded" + + // GrpcRetryPolicyEventInternal is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventInternal = "internal" + + // GrpcRetryPolicyEventResourceExhausted is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventResourceExhausted = "resource-exhausted" + + // GrpcRetryPolicyEventUnavailable is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventUnavailable = "unavailable" +) + const ( // HttpMethodConnect is a HttpMethod enum value HttpMethodConnect = "CONNECT" @@ -8459,9 +8937,15 @@ const ( ) const ( + // PortProtocolGrpc is a PortProtocol enum value + PortProtocolGrpc = "grpc" + // PortProtocolHttp is a PortProtocol enum value PortProtocolHttp = "http" + // PortProtocolHttp2 is a PortProtocol enum value + PortProtocolHttp2 = "http2" + // PortProtocolTcp is a PortProtocol enum value PortProtocolTcp = "tcp" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go index 88ea209b40e..962ada5cea9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppMesh { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appmesh" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppMesh { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppMesh { svc := &AppMesh{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-01-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go index 168cf44e0f7..51a7d38f3e6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppStream { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appstream" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppStream { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppStream { svc := &AppStream{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-12-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go index 6dae3d4cca7..e2003998c16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appsync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppSync { svc := &AppSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go index 6806a62ec95..1f2eaa078b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go @@ -46,11 +46,11 @@ const ( // svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Athena { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Athena { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Athena { svc := &Athena{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-05-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index e1da9fd7546..4ce49ffd27d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -46,11 +46,11 @@ const ( // svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScaling { svc := &AutoScaling{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go index dd6a1ece467..2c35b02391d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScalingPlans { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "autoscaling-plans" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AutoScalingPlans { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScalingPlans { svc := &AutoScalingPlans{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go index 2f60e1c1f54..873f24a9f9d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go @@ -46,11 +46,11 @@ const ( // svc := backup.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Backup { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Backup { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Backup { svc := &Backup{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go index 9cb0000b92f..d061b1533d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go @@ -2141,19 +2141,35 @@ func (s *ComputeEnvironmentOrder) SetOrder(v int64) *ComputeEnvironmentOrder { type ComputeResource struct { _ struct{} `type:"structure"` + // The allocation strategy to use for the compute resource in case not enough + // instances of the best fitting instance type can be allocated. This could + // be due to availability of the instance type in the region or Amazon EC2 service + // limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html). + // If this is not specified, the default is BEST_FIT, which will use only the + // best fitting instance type, waiting for additional capacity if it's not available. + // This allocation strategy keeps costs lower but can limit scaling. BEST_FIT_PROGRESSIVE + // will select an additional instance type that is large enough to meet the + // requirements of the jobs in the queue, with a preference for an instance + // type with a lower cost. SPOT_CAPACITY_OPTIMIZED is only available for Spot + // Instance compute resources and will select an additional instance type that + // is large enough to meet the requirements of the jobs in the queue, with a + // preference for an instance type that is less likely to be interrupted. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"CRAllocationStrategy"` + // The maximum percentage that a Spot Instance price can be when compared with // the On-Demand price for that instance type before instances are launched. // For example, if your maximum percentage is 20%, then the Spot price must - // be below 20% of the current On-Demand price for that EC2 instance. You always - // pay the lowest (market) price and never more than your maximum percentage. - // If you leave this field empty, the default value is 100% of the On-Demand - // price. + // be below 20% of the current On-Demand price for that Amazon EC2 instance. + // You always pay the lowest (market) price and never more than your maximum + // percentage. If you leave this field empty, the default value is 100% of the + // On-Demand price. BidPercentage *int64 `locationName:"bidPercentage" type:"integer"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The EC2 key pair that is used for instances launched in the compute environment. + // The Amazon EC2 key pair that is used for instances launched in the compute + // environment. Ec2KeyPair *string `locationName:"ec2KeyPair" type:"string"` // The Amazon Machine Image (AMI) ID used for instances launched in the compute @@ -2170,8 +2186,8 @@ type ComputeResource struct { InstanceRole *string `locationName:"instanceRole" type:"string" required:"true"` // The instances types that may be launched. You can specify instance families - // to launch any instance type within those families (for example, c4 or p3), - // or you can specify specific sizes within a family (such as c4.8xlarge). You + // to launch any instance type within those families (for example, c5 or p3), + // or you can specify specific sizes within a family (such as c5.8xlarge). You // can also choose optimal to pick instance types (from the C, M, and R instance // families) on the fly that match the demand of your job queues. // @@ -2186,13 +2202,13 @@ type ComputeResource struct { // in the AWS Batch User Guide. LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. // // MaxvCpus is a required field MaxvCpus *int64 `locationName:"maxvCpus" type:"integer" required:"true"` - // The minimum number of EC2 vCPUs that an environment should maintain (even - // if the compute environment is DISABLED). + // The minimum number of Amazon EC2 vCPUs that an environment should maintain + // (even if the compute environment is DISABLED). // // MinvCpus is a required field MinvCpus *int64 `locationName:"minvCpus" type:"integer" required:"true"` @@ -2206,8 +2222,11 @@ type ComputeResource struct { // in the Amazon EC2 User Guide for Linux Instances. PlacementGroup *string `locationName:"placementGroup" type:"string"` - // The EC2 security group that is associated with instances launched in the - // compute environment. + // The Amazon EC2 security groups associated with instances launched in the + // compute environment. One or more security groups must be specified, either + // in securityGroupIds or using a launch template referenced in launchTemplate. + // If security groups are specified using both securityGroupIds and launchTemplate, + // the values in securityGroupIds will be used. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` // The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied @@ -2273,6 +2292,12 @@ func (s *ComputeResource) Validate() error { return nil } +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *ComputeResource) SetAllocationStrategy(v string) *ComputeResource { + s.AllocationStrategy = &v + return s +} + // SetBidPercentage sets the BidPercentage field's value. func (s *ComputeResource) SetBidPercentage(v int64) *ComputeResource { s.BidPercentage = &v @@ -2368,13 +2393,13 @@ func (s *ComputeResource) SetType(v string) *ComputeResource { type ComputeResourceUpdate struct { _ struct{} `type:"structure"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. MaxvCpus *int64 `locationName:"maxvCpus" type:"integer"` - // The minimum number of EC2 vCPUs that an environment should maintain. + // The minimum number of Amazon EC2 vCPUs that an environment should maintain. MinvCpus *int64 `locationName:"minvCpus" type:"integer"` } @@ -6063,6 +6088,17 @@ const ( CETypeUnmanaged = "UNMANAGED" ) +const ( + // CRAllocationStrategyBestFit is a CRAllocationStrategy enum value + CRAllocationStrategyBestFit = "BEST_FIT" + + // CRAllocationStrategyBestFitProgressive is a CRAllocationStrategy enum value + CRAllocationStrategyBestFitProgressive = "BEST_FIT_PROGRESSIVE" + + // CRAllocationStrategySpotCapacityOptimized is a CRAllocationStrategy enum value + CRAllocationStrategySpotCapacityOptimized = "SPOT_CAPACITY_OPTIMIZED" +) + const ( // CRTypeEc2 is a CRType enum value CRTypeEc2 = "EC2" diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go index 6b10821b33f..e4f63ee8dc3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go @@ -46,11 +46,11 @@ const ( // svc := batch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Batch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Batch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Batch { svc := &Batch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-08-10", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go index dc10c2aaa1b..3d72b59e981 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go @@ -46,11 +46,11 @@ const ( // svc := budgets.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Budgets { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Budgets { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Budgets { svc := &Budgets{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-20", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go index 8b9d30d5440..9f6d2871cfc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go @@ -46,11 +46,11 @@ const ( // svc := cloud9.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Cloud9 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Cloud9 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Cloud9 { svc := &Cloud9{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go index 65df49a0cc8..ab37537cc1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFormation { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFormation { svc := &CloudFormation{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index d692c8f030f..bfb1606aa4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudfront.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFront { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFront { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFront { svc := &CloudFront{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-03-26", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go index c86db6ae7b0..38657874267 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudHSMV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cloudhsm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudHSMV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudHSMV2 { svc := &CloudHSMV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go index 850bc137051..76c21764817 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudsearch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudSearch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudSearch { svc := &CloudSearch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go index 5f1d4ddc41f..ee863344c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudTrail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudTrail { svc := &CloudTrail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-11-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index 0cc1304030f..4fcd5618a7b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -2304,19 +2304,20 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // TagResource API operation for Amazon CloudWatch. // // Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. +// Currently, the only CloudWatch resources that can be tagged are alarms. +// // Tags can help you organize and categorize your resources. You can also use // them to scope user permissions, by granting a user permission to access or -// change only resources with certain tag values. In CloudWatch, alarms can -// be tagged. +// change only resources with certain tag values. // // Tags don't have any semantic meaning to AWS and are interpreted strictly // as strings of characters. // -// You can use the TagResource action with a resource that already has tags. -// If you specify a new tag key for the resource, this tag is appended to the -// list of tags associated with the resource. If you specify a tag key that -// is already associated with the resource, the new tag value that you specify -// replaces the previous value for that tag. +// You can use the TagResource action with an alarm that already has tags. If +// you specify a new tag key for the alarm, this tag is appended to the list +// of tags associated with the alarm. If you specify a tag key that is already +// associated with the alarm, the new tag value that you specify replaces the +// previous value for that tag. // // You can associate as many as 50 tags with a resource. // @@ -5068,6 +5069,19 @@ type MetricDataQuery struct { // MetricStat but not both. MetricStat *MetricStat `type:"structure"` + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData operation that includes a StorageResolution of 1 second. + // + // Use this field only when you are performing a GetMetricData operation, and + // only when you are specifying the Expression field. Do not use this field + // with a PutMetricAlarm operation or when you are specifying a MetricStat in + // a GetMetricData operation. + Period *int64 `min:"1" type:"integer"` + // When used in GetMetricData, this option indicates whether to return the timestamps // and raw data values of this metric. If you are performing this call just // to do math expressions and do not also need the raw data returned, you can @@ -5101,6 +5115,9 @@ func (s *MetricDataQuery) Validate() error { if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } if s.MetricStat != nil { if err := s.MetricStat.Validate(); err != nil { invalidParams.AddNested("MetricStat", err.(request.ErrInvalidParams)) @@ -5137,6 +5154,12 @@ func (s *MetricDataQuery) SetMetricStat(v *MetricStat) *MetricDataQuery { return s } +// SetPeriod sets the Period field's value. +func (s *MetricDataQuery) SetPeriod(v int64) *MetricDataQuery { + s.Period = &v + return s +} + // SetReturnData sets the ReturnData field's value. func (s *MetricDataQuery) SetReturnData(v bool) *MetricDataQuery { s.ReturnData = &v @@ -5400,7 +5423,25 @@ type MetricStat struct { // Metric is a required field Metric *Metric `type:"structure" required:"true"` - // The period, in seconds, to use when retrieving the metric. + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData call that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 + // hours ago, you must specify the period as follows or no data points in that + // time range is returned: + // + // * Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds + // (1 minute). + // + // * Start time between 15 and 63 days ago - Use a multiple of 300 seconds + // (5 minutes). + // + // * Start time greater than 63 days ago - Use a multiple of 3600 seconds + // (1 hour). // // Period is a required field Period *int64 `min:"1" type:"integer" required:"true"` @@ -6509,14 +6550,13 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch resource that you're adding tags to. For more information - // on ARN format, see Example ARNs (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch) - // in the Amazon Web Services General Reference. + // The ARN of the CloudWatch alarm that you're adding tags to. The ARN format + // is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The list of key-value pairs to associate with the resource. + // The list of key-value pairs to associate with the alarm. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go index 0d478662240..9b43ce1f0b3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatch { svc := &CloudWatch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-08-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go index 2a7f6969cd8..d20dca58bfa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchEvents { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchEvents { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchEvents { svc := &CloudWatchEvents{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-07", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index 8d5f929df84..59b3512359d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchLogs { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchLogs { svc := &CloudWatchLogs{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-03-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go index b9ff2b6f76e..d04b3dab2b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go @@ -46,11 +46,11 @@ const ( // svc := codebuild.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeBuild { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeBuild { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeBuild { svc := &CodeBuild{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go index c8cad394ab0..63bc31e314c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -46,11 +46,11 @@ const ( // svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeCommit { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeCommit { svc := &CodeCommit{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-13", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go index dd1eaf41355..dbb6a1bd26c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -46,11 +46,11 @@ const ( // svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeDeploy { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeDeploy { svc := &CodeDeploy{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go index 397a00f9eb2..0bc30449588 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := codepipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodePipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodePipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodePipeline { svc := &CodePipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go index 9ebae103f36..e5c2011c0a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentity { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentity { svc := &CognitoIdentity{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-06-30", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go index 68efbd80b6e..2c512449a10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentityprovider.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentityProvider { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { svc := &CognitoIdentityProvider{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go index 2fdea95561f..3cd3cff327d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go @@ -46,11 +46,11 @@ const ( // svc := configservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ConfigService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ConfigService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ConfigService { svc := &ConfigService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-12", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go index 39e3cedfeae..dca54fd5c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CostandUsageReportServic if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cur" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CostandUsageReportService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CostandUsageReportService { svc := &CostandUsageReportService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-01-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go index 8ee775e03ca..ff3e35d659c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go @@ -46,11 +46,11 @@ const ( // svc := databasemigrationservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DatabaseMigrationService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DatabaseMigrationService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DatabaseMigrationService { svc := &DatabaseMigrationService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-01-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go index ebd5c29b2fc..8f48358db5b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := datapipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataPipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataPipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataPipeline { svc := &DataPipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-29", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go index 22d8ddcb90c..9f883b9b883 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "datasync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataSync { svc := &DataSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-09", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go index 545ea0312c3..9a7d90302f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go @@ -46,11 +46,11 @@ const ( // svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DAX { svc := &DAX{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go index 0f2354a4e1c..4e10bf876dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go @@ -46,11 +46,11 @@ const ( // svc := devicefarm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DeviceFarm { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DeviceFarm { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DeviceFarm { svc := &DeviceFarm{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-06-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go index bb182821c59..be5ad460a9f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -46,11 +46,11 @@ const ( // svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectConnect { svc := &DirectConnect{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-25", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go index 0743c9632ca..5c4c03db34f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -46,11 +46,11 @@ const ( // svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectoryService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectoryService { svc := &DirectoryService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-16", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go index b062fe30d63..ada01c825d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DLM { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "dlm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DLM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DLM { svc := &DLM{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-12", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go index cd0f3d91dc5..c891f31c420 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DocDB { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DocDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DocDB { svc := &DocDB{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index edcb5b8598e..0400da631df 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -48,11 +48,11 @@ const ( // svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DynamoDB { svc := &DynamoDB{ Client: client.New( cfg, @@ -61,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-08-10", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 8c69b86d27d..3df136f884d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -29541,16 +29541,24 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // You can't register an image where a secondary (non-root) snapshot has AWS // Marketplace product codes. // -// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE -// Linux Enterprise Server (SLES), use the EC2 billing product code associated -// with an AMI to verify the subscription status for package updates. Creating -// an AMI from an EBS snapshot does not maintain this billing code, and instances -// launched from such an AMI are not able to connect to package update infrastructure. -// If you purchase a Reserved Instance offering for one of these Linux distributions -// and launch instances using an AMI that does not contain the required billing -// code, your Reserved Instance is not applied to these instances. +// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) +// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code +// associated with an AMI to verify the subscription status for package updates. +// To create a new AMI for operating systems that require a billing product +// code, do the following: // -// To create an AMI for operating systems that require a billing code, see CreateImage. +// Launch an instance from an existing AMI with that billing product code. +// +// Customize the instance. +// +// Create a new AMI from the instance using CreateImage to preserve the billing +// product code association. +// +// If you purchase a Reserved Instance to apply to an On-Demand Instance that +// was launched from an AMI with a billing product code, make sure that the +// Reserved Instance has the matching billing product code. If you purchase +// a Reserved Instance without the matching billing product code, the Reserved +// Instance will not be applied to the On-Demand Instance. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. @@ -40765,6 +40773,9 @@ type CreateFpgaImageInput struct { // A name for the AFI. Name *string `type:"string"` + + // The tags to apply to the FPGA image during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -40826,6 +40837,12 @@ func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFpgaImageInput) SetTagSpecifications(v []*TagSpecification) *CreateFpgaImageInput { + s.TagSpecifications = v + return s +} + type CreateFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -90564,10 +90581,11 @@ type TagSpecification struct { // The type of resource to tag. Currently, the resource types that support tagging // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host - // | fleet | instance | launch-template | snapshot | transit-gateway | transit-gateway-attachment + // | fleet | fpga-image | instance | launch-template | snapshot | traffic-mirror-filter + // | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment // | transit-gateway-route-table | volume. // - // To tag a resource after it has been created, see CreateTags. + // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` // The tags to apply to the resource. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index 6acbc43fe3d..b2b9fb8c564 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -46,11 +46,11 @@ const ( // svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EC2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EC2 { svc := &EC2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 237899c6061..74ca7bb8e2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -442,7 +442,10 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // CreateRepository API operation for Amazon EC2 Container Registry. // -// Creates an image repository. +// Creates an Amazon Elastic Container Registry (Amazon ECR) repository, where +// users can push and pull Docker images. For more information, see Amazon ECR +// Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -772,6 +775,156 @@ func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRe return out, req.Send() } +const opDescribeImageScanFindings = "DescribeImageScanFindings" + +// DescribeImageScanFindingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageScanFindings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageScanFindings for more information on using the DescribeImageScanFindings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageScanFindingsRequest method. +// req, resp := client.DescribeImageScanFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsInput) (req *request.Request, output *DescribeImageScanFindingsOutput) { + op := &request.Operation{ + Name: opDescribeImageScanFindings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImageScanFindingsInput{} + } + + output = &DescribeImageScanFindingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageScanFindings API operation for Amazon EC2 Container Registry. +// +// Describes the image scan findings for the specified image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeImageScanFindings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. +// +// * ErrCodeScanNotFoundException "ScanNotFoundException" +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindings(input *DescribeImageScanFindingsInput) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + return out, req.Send() +} + +// DescribeImageScanFindingsWithContext is the same as DescribeImageScanFindings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageScanFindings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.Option) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImageScanFindingsPages iterates over the pages of a DescribeImageScanFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImageScanFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImageScanFindings operation. +// pageNum := 0 +// err := client.DescribeImageScanFindingsPages(params, +// func(page *ecr.DescribeImageScanFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeImageScanFindingsPages(input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool) error { + return c.DescribeImageScanFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImageScanFindingsPagesWithContext same as DescribeImageScanFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsPagesWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImageScanFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImageScanFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeImageScanFindingsOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the @@ -1378,6 +1531,12 @@ func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewI Name: opGetLifecyclePolicyPreview, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1437,6 +1596,56 @@ func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLi return out, req.Send() } +// GetLifecyclePolicyPreviewPages iterates over the pages of a GetLifecyclePolicyPreview operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLifecyclePolicyPreview method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLifecyclePolicyPreview operation. +// pageNum := 0 +// err := client.GetLifecyclePolicyPreviewPages(params, +// func(page *ecr.GetLifecyclePolicyPreviewOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) GetLifecyclePolicyPreviewPages(input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool) error { + return c.GetLifecyclePolicyPreviewPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLifecyclePolicyPreviewPagesWithContext same as GetLifecyclePolicyPreviewPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewPagesWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetLifecyclePolicyPreviewInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*GetLifecyclePolicyPreviewOutput), !p.HasNextPage()) + } + return p.Err() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1964,6 +2173,93 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } +const opPutImageScanningConfiguration = "PutImageScanningConfiguration" + +// PutImageScanningConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutImageScanningConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutImageScanningConfiguration for more information on using the PutImageScanningConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutImageScanningConfigurationRequest method. +// req, resp := client.PutImageScanningConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfigurationInput) (req *request.Request, output *PutImageScanningConfigurationOutput) { + op := &request.Operation{ + Name: opPutImageScanningConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageScanningConfigurationInput{} + } + + output = &PutImageScanningConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutImageScanningConfiguration API operation for Amazon EC2 Container Registry. +// +// Updates the image scanning configuration for a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutImageScanningConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfiguration(input *PutImageScanningConfigurationInput) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + return out, req.Send() +} + +// PutImageScanningConfigurationWithContext is the same as PutImageScanningConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutImageScanningConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageScanningConfigurationWithContext(ctx aws.Context, input *PutImageScanningConfigurationInput, opts ...request.Option) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutImageTagMutability = "PutImageTagMutability" // PutImageTagMutabilityRequest generates a "aws/request.Request" representing the @@ -2228,59 +2524,58 @@ func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetReposito return out, req.Send() } -const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" +const opStartImageScan = "StartImageScan" -// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the -// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// StartImageScanRequest generates a "aws/request.Request" representing the +// client's request for the StartImageScan operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// See StartImageScan for more information on using the StartImageScan // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. -// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// // Example sending a request using the StartImageScanRequest method. +// req, resp := client.StartImageScanRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Request, output *StartImageScanOutput) { op := &request.Operation{ - Name: opStartLifecyclePolicyPreview, + Name: opStartImageScan, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartLifecyclePolicyPreviewInput{} + input = &StartImageScanInput{} } - output = &StartLifecyclePolicyPreviewOutput{} + output = &StartImageScanOutput{} req = c.newRequest(op, input, output) return } -// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// StartImageScan API operation for Amazon EC2 Container Registry. // -// Starts a preview of the specified lifecycle policy. This allows you to see -// the results before creating the lifecycle policy. +// Starts an image vulnerability scan. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation StartLifecyclePolicyPreview for usage and error information. +// API operation StartImageScan for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -2294,36 +2589,127 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" -// The lifecycle policy could not be found, and no policy is set to the repository. -// -// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScan(input *StartImageScanInput) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) return out, req.Send() } -// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// StartImageScanWithContext is the same as StartImageScan with the addition of // the ability to pass a context and additional request options. // -// See StartLifecyclePolicyPreview for details on how to use this API operation. +// See StartImageScan for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +func (c *ECR) StartImageScanWithContext(ctx aws.Context, input *StartImageScanInput, opts ...request.Option) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the // client's request for the TagResource operation. The "output" return @@ -2623,6 +3009,41 @@ func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPart return out, req.Send() } +// This data type is used in the ImageScanFinding data type. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Attribute) SetKey(v string) *Attribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Attribute) SetValue(v string) *Attribute { + s.Value = &v + return s +} + // An object representing authorization data for an Amazon ECR registry. type AuthorizationData struct { _ struct{} `type:"structure"` @@ -3146,6 +3567,11 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreateRepositoryInput struct { _ struct{} `type:"structure"` + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + // The tag mutability setting for the repository. If this parameter is omitted, // the default setting of MUTABLE will be used which will allow image tags to // be overwritten. If IMMUTABLE is specified, all image tags within the repository @@ -3192,6 +3618,12 @@ func (s *CreateRepositoryInput) Validate() error { return nil } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { + s.ImageScanningConfiguration = v + return s +} + // SetImageTagMutability sets the ImageTagMutability field's value. func (s *CreateRepositoryInput) SetImageTagMutability(v string) *CreateRepositoryInput { s.ImageTagMutability = &v @@ -3511,6 +3943,180 @@ func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteReposi return s } +type DescribeImageScanFindingsInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, + // if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeImageScanFindings + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to describe the image scan findings for. If you do not specify a + // registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository for the image for which to describe the scan findings. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageScanFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageScanFindingsInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsInput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsInput { + s.ImageId = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImageScanFindingsInput) SetMaxResults(v int64) *DescribeImageScanFindingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsInput) SetNextToken(v string) *DescribeImageScanFindingsInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsInput) SetRegistryId(v string) *DescribeImageScanFindingsInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsInput) SetRepositoryName(v string) *DescribeImageScanFindingsInput { + s.RepositoryName = &v + return s +} + +type DescribeImageScanFindingsOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The information contained in the image scan findings. + ImageScanFindings *ImageScanFindings `locationName:"imageScanFindings" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The nextToken value to include in a future DescribeImageScanFindings request. + // When the results of a DescribeImageScanFindings request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsOutput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsOutput { + s.ImageId = v + return s +} + +// SetImageScanFindings sets the ImageScanFindings field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanFindings(v *ImageScanFindings) *DescribeImageScanFindingsOutput { + s.ImageScanFindings = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanStatus(v *ImageScanStatus) *DescribeImageScanFindingsOutput { + s.ImageScanStatus = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsOutput) SetNextToken(v string) *DescribeImageScanFindingsOutput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsOutput) SetRegistryId(v string) *DescribeImageScanFindingsOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsOutput) SetRepositoryName(v string) *DescribeImageScanFindingsOutput { + s.RepositoryName = &v + return s +} + // An object representing a filter on a DescribeImages operation. type DescribeImagesFilter struct { _ struct{} `type:"structure"` @@ -4361,7 +4967,7 @@ type Image struct { ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` // The image manifest associated with the image. - ImageManifest *string `locationName:"imageManifest" type:"string"` + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` // The AWS account ID associated with the registry containing the image. RegistryId *string `locationName:"registryId" type:"string"` @@ -4415,6 +5021,12 @@ type ImageDetail struct { // the current image was pushed to the repository. ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` + // A summary of the last completed image scan. + ImageScanFindingsSummary *ImageScanFindingsSummary `locationName:"imageScanFindingsSummary" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + // The size, in bytes, of the image in the repository. // // Beginning with Docker version 1.9, the Docker client compresses image layers @@ -4455,13 +5067,25 @@ func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { return s } -// SetImageSizeInBytes sets the ImageSizeInBytes field's value. -func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { - s.ImageSizeInBytes = &v +// SetImageScanFindingsSummary sets the ImageScanFindingsSummary field's value. +func (s *ImageDetail) SetImageScanFindingsSummary(v *ImageScanFindingsSummary) *ImageDetail { + s.ImageScanFindingsSummary = v return s } -// SetImageTags sets the ImageTags field's value. +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *ImageDetail) SetImageScanStatus(v *ImageScanStatus) *ImageDetail { + s.ImageScanStatus = v + return s +} + +// SetImageSizeInBytes sets the ImageSizeInBytes field's value. +func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { + s.ImageSizeInBytes = &v + return s +} + +// SetImageTags sets the ImageTags field's value. func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail { s.ImageTags = v return s @@ -4567,6 +5191,220 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { return s } +// Contains information about an image scan finding. +type ImageScanFinding struct { + _ struct{} `type:"structure"` + + // A collection of attributes of the host from which the finding is generated. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The description of the finding. + Description *string `locationName:"description" type:"string"` + + // The name associated with the finding, usually a CVE number. + Name *string `locationName:"name" type:"string"` + + // The finding severity. + Severity *string `locationName:"severity" type:"string" enum:"FindingSeverity"` + + // A link containing additional details about the security vulnerability. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s ImageScanFinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFinding) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ImageScanFinding) SetAttributes(v []*Attribute) *ImageScanFinding { + s.Attributes = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImageScanFinding) SetDescription(v string) *ImageScanFinding { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImageScanFinding) SetName(v string) *ImageScanFinding { + s.Name = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ImageScanFinding) SetSeverity(v string) *ImageScanFinding { + s.Severity = &v + return s +} + +// SetUri sets the Uri field's value. +func (s *ImageScanFinding) SetUri(v string) *ImageScanFinding { + s.Uri = &v + return s +} + +// The details of an image scan. +type ImageScanFindings struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The findings from the image scan. + Findings []*ImageScanFinding `locationName:"findings" type:"list"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindings) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindings) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindings { + s.FindingSeverityCounts = v + return s +} + +// SetFindings sets the Findings field's value. +func (s *ImageScanFindings) SetFindings(v []*ImageScanFinding) *ImageScanFindings { + s.Findings = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindings) SetImageScanCompletedAt(v time.Time) *ImageScanFindings { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindings) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindings { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// A summary of the last completed image scan. +type ImageScanFindingsSummary struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindingsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindingsSummary) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindingsSummary) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindingsSummary { + s.FindingSeverityCounts = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindingsSummary) SetImageScanCompletedAt(v time.Time) *ImageScanFindingsSummary { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindingsSummary) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindingsSummary { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// The current status of an image scan. +type ImageScanStatus struct { + _ struct{} `type:"structure"` + + // The description of the image scan status. + Description *string `locationName:"description" type:"string"` + + // The current state of an image scan. + Status *string `locationName:"status" type:"string" enum:"ScanStatus"` +} + +// String returns the string representation +func (s ImageScanStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanStatus) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImageScanStatus) SetDescription(v string) *ImageScanStatus { + s.Description = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImageScanStatus) SetStatus(v string) *ImageScanStatus { + s.Status = &v + return s +} + +// The image scanning configuration for a repository. +type ImageScanningConfiguration struct { + _ struct{} `type:"structure"` + + // The setting that determines whether images are scanned after being pushed + // to a repository. If set to true, images will be scanned after being pushed. + // If this parameter is not specified, it will default to false and images will + // not be scanned unless a scan is manually started with the StartImageScan + // API. + ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"` +} + +// String returns the string representation +func (s ImageScanningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanningConfiguration) GoString() string { + return s.String() +} + +// SetScanOnPush sets the ScanOnPush field's value. +func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfiguration { + s.ScanOnPush = &v + return s +} + type InitiateLayerUploadInput struct { _ struct{} `type:"structure"` @@ -5101,7 +5939,7 @@ type PutImageInput struct { // The image manifest corresponding to the image to be uploaded. // // ImageManifest is a required field - ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` // The tag to associate with the image. This parameter is required for images // that use the Docker Image Manifest V2 Schema 2 or OCI formats. @@ -5134,6 +5972,9 @@ func (s *PutImageInput) Validate() error { if s.ImageManifest == nil { invalidParams.Add(request.NewErrParamRequired("ImageManifest")) } + if s.ImageManifest != nil && len(*s.ImageManifest) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageManifest", 1)) + } if s.ImageTag != nil && len(*s.ImageTag) < 1 { invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) } @@ -5197,6 +6038,116 @@ func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { return s } +type PutImageScanningConfigurationInput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + // + // ImageScanningConfiguration is a required field + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to update the image scanning configuration setting. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to update the image scanning configuration + // setting. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageScanningConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageScanningConfigurationInput"} + if s.ImageScanningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ImageScanningConfiguration")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationInput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationInput) SetRegistryId(v string) *PutImageScanningConfigurationInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationInput) SetRepositoryName(v string) *PutImageScanningConfigurationInput { + s.RepositoryName = &v + return s +} + +type PutImageScanningConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration setting for the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationOutput) GoString() string { + return s.String() +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationOutput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationOutput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationOutput) SetRegistryId(v string) *PutImageScanningConfigurationOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationOutput) SetRepositoryName(v string) *PutImageScanningConfigurationOutput { + s.RepositoryName = &v + return s +} + type PutImageTagMutabilityInput struct { _ struct{} `type:"structure"` @@ -5422,6 +6373,9 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The image scanning configuration for a repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + // The tag mutability setting for the repository. ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` @@ -5458,6 +6412,12 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository { return s } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { + s.ImageScanningConfiguration = v + return s +} + // SetImageTagMutability sets the ImageTagMutability field's value. func (s *Repository) SetImageTagMutability(v string) *Repository { s.ImageTagMutability = &v @@ -5607,6 +6567,127 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } +type StartImageScanInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to start an image scan request. If you do not specify a registry, + // the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that contains the images to scan. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartImageScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartImageScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImageScanInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanInput) SetImageId(v *ImageIdentifier) *StartImageScanInput { + s.ImageId = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanInput) SetRegistryId(v string) *StartImageScanInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanInput) SetRepositoryName(v string) *StartImageScanInput { + s.RepositoryName = &v + return s +} + +type StartImageScanOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s StartImageScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanOutput) SetImageId(v *ImageIdentifier) *StartImageScanOutput { + s.ImageId = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *StartImageScanOutput) SetImageScanStatus(v *ImageScanStatus) *StartImageScanOutput { + s.ImageScanStatus = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanOutput) SetRegistryId(v string) *StartImageScanOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanOutput) SetRepositoryName(v string) *StartImageScanOutput { + s.RepositoryName = &v + return s +} + type StartLifecyclePolicyPreviewInput struct { _ struct{} `type:"structure"` @@ -6055,6 +7136,26 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // FindingSeverityInformational is a FindingSeverity enum value + FindingSeverityInformational = "INFORMATIONAL" + + // FindingSeverityLow is a FindingSeverity enum value + FindingSeverityLow = "LOW" + + // FindingSeverityMedium is a FindingSeverity enum value + FindingSeverityMedium = "MEDIUM" + + // FindingSeverityHigh is a FindingSeverity enum value + FindingSeverityHigh = "HIGH" + + // FindingSeverityCritical is a FindingSeverity enum value + FindingSeverityCritical = "CRITICAL" + + // FindingSeverityUndefined is a FindingSeverity enum value + FindingSeverityUndefined = "UNDEFINED" +) + const ( // ImageActionTypeExpire is a ImageActionType enum value ImageActionTypeExpire = "EXPIRE" @@ -6115,6 +7216,17 @@ const ( LifecyclePolicyPreviewStatusFailed = "FAILED" ) +const ( + // ScanStatusInProgress is a ScanStatus enum value + ScanStatusInProgress = "IN_PROGRESS" + + // ScanStatusComplete is a ScanStatus enum value + ScanStatusComplete = "COMPLETE" + + // ScanStatusFailed is a ScanStatus enum value + ScanStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index c1f18605ca6..786759af0fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -140,6 +140,13 @@ const ( // repository policy. ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException" + // ErrCodeScanNotFoundException for service response error code + // "ScanNotFoundException". + // + // The specified image scan could not be found. Ensure that image scanning is + // enabled on the repository and try again. + ErrCodeScanNotFoundException = "ScanNotFoundException" + // ErrCodeServerException for service response error code // "ServerException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 3eba7f696b6..b1ee8a2c723 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ecr" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECR { svc := &ECR{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-09-21", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go index c268614ecb9..a2eef295918 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -46,11 +46,11 @@ const ( // svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECS { svc := &ECS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-13", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go index 6b1a11c900a..3b6336d6224 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -46,11 +46,11 @@ const ( // svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EFS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EFS { svc := &EFS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go index a51bf5458c6..9c3ae450660 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EKS { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "eks" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EKS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EKS { svc := &EKS{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index 6df29f91868..c2a35cf2c31 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -377,6 +377,91 @@ func (c *ElastiCache) BatchStopUpdateActionWithContext(ctx aws.Context, input *B return out, req.Send() } +const opCompleteMigration = "CompleteMigration" + +// CompleteMigrationRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMigration for more information on using the CompleteMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMigrationRequest method. +// req, resp := client.CompleteMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigrationRequest(input *CompleteMigrationInput) (req *request.Request, output *CompleteMigrationOutput) { + op := &request.Operation{ + Name: opCompleteMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteMigrationInput{} + } + + output = &CompleteMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMigration API operation for Amazon ElastiCache. +// +// Complete the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation CompleteMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupNotUnderMigrationFault "ReplicationGroupNotUnderMigrationFault" +// The designated replication group is not available for data migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigration(input *CompleteMigrationInput) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + return out, req.Send() +} + +// CompleteMigrationWithContext is the same as CompleteMigration with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) CompleteMigrationWithContext(ctx aws.Context, input *CompleteMigrationInput, opts ...request.Option) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the @@ -5264,6 +5349,94 @@ func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context return out, req.Send() } +const opStartMigration = "StartMigration" + +// StartMigrationRequest generates a "aws/request.Request" representing the +// client's request for the StartMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMigration for more information on using the StartMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartMigrationRequest method. +// req, resp := client.StartMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigrationRequest(input *StartMigrationInput) (req *request.Request, output *StartMigrationOutput) { + op := &request.Operation{ + Name: opStartMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMigrationInput{} + } + + output = &StartMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMigration API operation for Amazon ElastiCache. +// +// Start the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation StartMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupAlreadyUnderMigrationFault "ReplicationGroupAlreadyUnderMigrationFault" +// The targeted replication group is not available. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigration(input *StartMigrationInput) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + return out, req.Send() +} + +// StartMigrationWithContext is the same as StartMigration with the addition of +// the ability to pass a context and additional request options. +// +// See StartMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) StartMigrationWithContext(ctx aws.Context, input *StartMigrationInput, opts ...request.Option) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestFailover = "TestFailover" // TestFailoverRequest generates a "aws/request.Request" representing the @@ -6769,6 +6942,78 @@ func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup { return s } +type CompleteMigrationInput struct { + _ struct{} `type:"structure"` + + // Forces the migration to stop without ensuring that data is in sync. It is + // recommended to use this option only to abort the migration and not recommended + // when application wants to continue migration to ElastiCache. + Force *bool `type:"boolean"` + + // The ID of the replication group to which data is being migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMigrationInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *CompleteMigrationInput) SetForce(v bool) *CompleteMigrationInput { + s.Force = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CompleteMigrationInput) SetReplicationGroupId(v string) *CompleteMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type CompleteMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CompleteMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CompleteMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *CompleteMigrationOutput { + s.ReplicationGroup = v + return s +} + // Node group (shard) configuration options when adding or removing replicas. // Each node group (shard) configuration has the following members: NodeGroupId, // NewReplicaCount, and PreferredAvailabilityZones. @@ -8326,6 +8571,39 @@ func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput { return s } +// The endpoint from which data should be migrated. +type CustomerNodeEndpoint struct { + _ struct{} `type:"structure"` + + // The address of the node endpoint + Address *string `type:"string"` + + // The port of the node endpoint + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s CustomerNodeEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerNodeEndpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *CustomerNodeEndpoint) SetAddress(v string) *CustomerNodeEndpoint { + s.Address = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CustomerNodeEndpoint) SetPort(v int64) *CustomerNodeEndpoint { + s.Port = &v + return s +} + type DecreaseReplicaCountInput struct { _ struct{} `type:"structure"` @@ -14118,6 +14396,82 @@ func (s *Snapshot) SetVpcId(v string) *Snapshot { return s } +type StartMigrationInput struct { + _ struct{} `type:"structure"` + + // List of endpoints from which data should be migrated. For Redis (cluster + // mode disabled), list should have only one element. + // + // CustomerNodeEndpointList is a required field + CustomerNodeEndpointList []*CustomerNodeEndpoint `type:"list" required:"true"` + + // The ID of the replication group to which data should be migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMigrationInput"} + if s.CustomerNodeEndpointList == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerNodeEndpointList")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerNodeEndpointList sets the CustomerNodeEndpointList field's value. +func (s *StartMigrationInput) SetCustomerNodeEndpointList(v []*CustomerNodeEndpoint) *StartMigrationInput { + s.CustomerNodeEndpointList = v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *StartMigrationInput) SetReplicationGroupId(v string) *StartMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type StartMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s StartMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *StartMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *StartMigrationOutput { + s.ReplicationGroup = v + return s +} + // Represents the subnet associated with a cluster. This parameter refers to // subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with // ElastiCache. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go index 8f75570b996..25579b1d5d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go @@ -235,12 +235,24 @@ const ( // The specified replication group already exists. ErrCodeReplicationGroupAlreadyExistsFault = "ReplicationGroupAlreadyExists" + // ErrCodeReplicationGroupAlreadyUnderMigrationFault for service response error code + // "ReplicationGroupAlreadyUnderMigrationFault". + // + // The targeted replication group is not available. + ErrCodeReplicationGroupAlreadyUnderMigrationFault = "ReplicationGroupAlreadyUnderMigrationFault" + // ErrCodeReplicationGroupNotFoundFault for service response error code // "ReplicationGroupNotFoundFault". // // The specified replication group does not exist. ErrCodeReplicationGroupNotFoundFault = "ReplicationGroupNotFoundFault" + // ErrCodeReplicationGroupNotUnderMigrationFault for service response error code + // "ReplicationGroupNotUnderMigrationFault". + // + // The designated replication group is not available for data migration. + ErrCodeReplicationGroupNotUnderMigrationFault = "ReplicationGroupNotUnderMigrationFault" + // ErrCodeReservedCacheNodeAlreadyExistsFault for service response error code // "ReservedCacheNodeAlreadyExists". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go index fd5f8c51707..2ad929dca32 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElastiCache { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElastiCache { svc := &ElastiCache{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-02", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go index 12e8b1c819a..841587452ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticbeanstalk.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticBeanstalk { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticBeanstalk { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticBeanstalk { svc := &ElasticBeanstalk{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index d2f8f382733..9f309cdb009 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticsearchService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticsearchService { svc := &ElasticsearchService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go index 30acb8d1bc0..9a33ddbf610 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -46,11 +46,11 @@ const ( // svc := elastictranscoder.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticTranscoder { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticTranscoder { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticTranscoder { svc := &ElasticTranscoder{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-09-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 5dfdd322c9b..73e40b747c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -46,11 +46,11 @@ const ( // svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELB { svc := &ELB{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go index ad97e8df885..1fcdb5bf44c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go @@ -46,11 +46,11 @@ const ( // svc := elbv2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELBV2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELBV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELBV2 { svc := &ELBV2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go index 92735a793d4..40af82bfabc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -46,11 +46,11 @@ const ( // svc := emr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EMR { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EMR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EMR { svc := &EMR{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go index bcdf23dffb9..6e234b3c9e6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -46,11 +46,11 @@ const ( // svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Firehose { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Firehose { svc := &Firehose{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-04", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go index 6103e57fd82..e39fa854236 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go @@ -46,11 +46,11 @@ const ( // svc := fms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FMS { svc := &FMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go index 2f1cd5c6fd5..f7b4cda112f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ForecastService { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "forecast" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ForecastService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ForecastService { svc := &ForecastService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-06-26", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go index 544a82e1b07..0afe7ddcb61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go @@ -46,11 +46,11 @@ const ( // svc := fsx.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FSx { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FSx { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FSx { svc := &FSx{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-03-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go index 5fffd192979..c29096f3f1b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -20304,6 +20304,30 @@ const ( // EC2InstanceTypeC48xlarge is a EC2InstanceType enum value EC2InstanceTypeC48xlarge = "c4.8xlarge" + // EC2InstanceTypeC5Large is a EC2InstanceType enum value + EC2InstanceTypeC5Large = "c5.large" + + // EC2InstanceTypeC5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5Xlarge = "c5.xlarge" + + // EC2InstanceTypeC52xlarge is a EC2InstanceType enum value + EC2InstanceTypeC52xlarge = "c5.2xlarge" + + // EC2InstanceTypeC54xlarge is a EC2InstanceType enum value + EC2InstanceTypeC54xlarge = "c5.4xlarge" + + // EC2InstanceTypeC59xlarge is a EC2InstanceType enum value + EC2InstanceTypeC59xlarge = "c5.9xlarge" + + // EC2InstanceTypeC512xlarge is a EC2InstanceType enum value + EC2InstanceTypeC512xlarge = "c5.12xlarge" + + // EC2InstanceTypeC518xlarge is a EC2InstanceType enum value + EC2InstanceTypeC518xlarge = "c5.18xlarge" + + // EC2InstanceTypeC524xlarge is a EC2InstanceType enum value + EC2InstanceTypeC524xlarge = "c5.24xlarge" + // EC2InstanceTypeR3Large is a EC2InstanceType enum value EC2InstanceTypeR3Large = "r3.large" @@ -20337,6 +20361,30 @@ const ( // EC2InstanceTypeR416xlarge is a EC2InstanceType enum value EC2InstanceTypeR416xlarge = "r4.16xlarge" + // EC2InstanceTypeR5Large is a EC2InstanceType enum value + EC2InstanceTypeR5Large = "r5.large" + + // EC2InstanceTypeR5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5Xlarge = "r5.xlarge" + + // EC2InstanceTypeR52xlarge is a EC2InstanceType enum value + EC2InstanceTypeR52xlarge = "r5.2xlarge" + + // EC2InstanceTypeR54xlarge is a EC2InstanceType enum value + EC2InstanceTypeR54xlarge = "r5.4xlarge" + + // EC2InstanceTypeR58xlarge is a EC2InstanceType enum value + EC2InstanceTypeR58xlarge = "r5.8xlarge" + + // EC2InstanceTypeR512xlarge is a EC2InstanceType enum value + EC2InstanceTypeR512xlarge = "r5.12xlarge" + + // EC2InstanceTypeR516xlarge is a EC2InstanceType enum value + EC2InstanceTypeR516xlarge = "r5.16xlarge" + + // EC2InstanceTypeR524xlarge is a EC2InstanceType enum value + EC2InstanceTypeR524xlarge = "r5.24xlarge" + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value EC2InstanceTypeM3Medium = "m3.medium" @@ -20363,6 +20411,30 @@ const ( // EC2InstanceTypeM410xlarge is a EC2InstanceType enum value EC2InstanceTypeM410xlarge = "m4.10xlarge" + + // EC2InstanceTypeM5Large is a EC2InstanceType enum value + EC2InstanceTypeM5Large = "m5.large" + + // EC2InstanceTypeM5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5Xlarge = "m5.xlarge" + + // EC2InstanceTypeM52xlarge is a EC2InstanceType enum value + EC2InstanceTypeM52xlarge = "m5.2xlarge" + + // EC2InstanceTypeM54xlarge is a EC2InstanceType enum value + EC2InstanceTypeM54xlarge = "m5.4xlarge" + + // EC2InstanceTypeM58xlarge is a EC2InstanceType enum value + EC2InstanceTypeM58xlarge = "m5.8xlarge" + + // EC2InstanceTypeM512xlarge is a EC2InstanceType enum value + EC2InstanceTypeM512xlarge = "m5.12xlarge" + + // EC2InstanceTypeM516xlarge is a EC2InstanceType enum value + EC2InstanceTypeM516xlarge = "m5.16xlarge" + + // EC2InstanceTypeM524xlarge is a EC2InstanceType enum value + EC2InstanceTypeM524xlarge = "m5.24xlarge" ) const ( @@ -20633,6 +20705,9 @@ const ( // OperatingSystemAmazonLinux is a OperatingSystem enum value OperatingSystemAmazonLinux = "AMAZON_LINUX" + + // OperatingSystemAmazonLinux2 is a OperatingSystem enum value + OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go index a2361e47690..2d5f27330a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -46,11 +46,11 @@ const ( // svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GameLift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GameLift { svc := &GameLift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go index 85e6e367b20..b8e0cffc6cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -46,11 +46,11 @@ const ( // svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glacier { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glacier { svc := &Glacier{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go index 31552ab8c29..7750c412f61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GlobalAccelerator { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "globalaccelerator" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GlobalAccelerator { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GlobalAccelerator { svc := &GlobalAccelerator{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go index 075b0a1df6c..25770968a17 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go @@ -46,11 +46,11 @@ const ( // svc := glue.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glue { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glue { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glue { svc := &Glue{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go index 1c9835a9161..eec5c772aed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GuardDuty { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "guardduty" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GuardDuty { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GuardDuty { svc := &GuardDuty{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-28", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index 940b4ce3283..62228c482e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -46,11 +46,11 @@ const ( // svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IAM { svc := &IAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-08", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go index 2e68b4e4d23..aae2fa86571 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go @@ -46,11 +46,11 @@ const ( // svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Inspector { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Inspector { svc := &Inspector{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-16", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go index 10a95d5607c..0391dde5474 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "execute-api" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoT { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoT { svc := &IoT{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-05-28", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go index 54edbd56f34..97b51926b97 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTAnalytics { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "iotanalytics" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoTAnalytics { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTAnalytics { svc := &IoTAnalytics{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go index 551a03fef98..954cf1a17dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go @@ -1557,7 +1557,7 @@ type ActionData struct { ClearTimer *ClearTimerAction `locationName:"clearTimer" type:"structure"` // Sends information about the detector model instance and the event which triggered - // the action to a Kinesis Data Firehose stream. + // the action to a Kinesis Data Firehose delivery stream. Firehose *FirehoseAction `locationName:"firehose" type:"structure"` // Sends an IoT Events input, passing in information about the detector model @@ -1567,7 +1567,7 @@ type ActionData struct { // Publishes an MQTT message with the given topic to the AWS IoT message broker. IotTopicPublish *IotTopicPublishAction `locationName:"iotTopicPublish" type:"structure"` - // Calls a Lambda function, passing in information about the detector model + // Calls an AWS Lambda function, passing in information about the detector model // instance and the event which triggered the action. Lambda *LambdaAction `locationName:"lambda" type:"structure"` @@ -1584,7 +1584,7 @@ type ActionData struct { Sns *SNSTopicPublishAction `locationName:"sns" type:"structure"` // Sends information about the detector model instance and the event which triggered - // the action to an AWS SQS queue. + // the action to an Amazon SQS queue. Sqs *SqsAction `locationName:"sqs" type:"structure"` } @@ -1829,11 +1829,16 @@ type CreateDetectorModelInput struct { // DetectorModelName is a required field DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string" required:"true"` - // The input attribute key used to identify a device or system in order to create - // a detector (an instance of the detector model) and then to route each input - // received to the appropriate detector (instance). This parameter uses a JSON-path - // expression to specify the attribute-value pair in the message payload of - // each input that is used to identify the device associated with the input. + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. Key *string `locationName:"key" min:"1" type:"string"` // The ARN of the role that grants permission to AWS IoT Events to perform its @@ -1917,6 +1922,12 @@ func (s *CreateDetectorModelInput) SetDetectorModelName(v string) *CreateDetecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *CreateDetectorModelInput) SetEvaluationMethod(v string) *CreateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + // SetKey sets the Key field's value. func (s *CreateDetectorModelInput) SetKey(v string) *CreateDetectorModelInput { s.Key = &v @@ -2464,11 +2475,16 @@ type DetectorModelConfiguration struct { // The version of the detector model. DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` - // The input attribute key used to identify a device or system in order to create - // a detector (an instance of the detector model) and then to route each input - // received to the appropriate detector (instance). This parameter uses a JSON-path - // expression to specify the attribute-value pair in the message payload of - // each input that is used to identify the device associated with the input. + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. Key *string `locationName:"key" min:"1" type:"string"` // The time the detector model was last updated. @@ -2522,6 +2538,12 @@ func (s *DetectorModelConfiguration) SetDetectorModelVersion(v string) *Detector return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelConfiguration) SetEvaluationMethod(v string) *DetectorModelConfiguration { + s.EvaluationMethod = &v + return s +} + // SetKey sets the Key field's value. func (s *DetectorModelConfiguration) SetKey(v string) *DetectorModelConfiguration { s.Key = &v @@ -2673,6 +2695,11 @@ type DetectorModelVersionSummary struct { // The ID of the detector model version. DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + // The last time the detector model version was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` @@ -2718,6 +2745,12 @@ func (s *DetectorModelVersionSummary) SetDetectorModelVersion(v string) *Detecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelVersionSummary) SetEvaluationMethod(v string) *DetectorModelVersionSummary { + s.EvaluationMethod = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *DetectorModelVersionSummary) SetLastUpdateTime(v time.Time) *DetectorModelVersionSummary { s.LastUpdateTime = &v @@ -2807,18 +2840,18 @@ func (s *Event) SetEventName(v string) *Event { } // Sends information about the detector model instance and the event which triggered -// the action to a Kinesis Data Firehose stream. +// the action to a Kinesis Data Firehose delivery stream. type FirehoseAction struct { _ struct{} `type:"structure"` - // The name of the Kinesis Data Firehose stream where the data is written. + // The name of the Kinesis Data Firehose delivery stream where the data is written. // // DeliveryStreamName is a required field DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` // A character separator that is used to separate records written to the Kinesis - // Data Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' - // (Windows newline), ',' (comma). + // Data Firehose delivery stream. Valid values are: '\n' (newline), '\t' (tab), + // '\r\n' (Windows newline), ',' (comma). Separator *string `locationName:"separator" type:"string"` } @@ -3137,12 +3170,12 @@ func (s *IotTopicPublishAction) SetMqttTopic(v string) *IotTopicPublishAction { return s } -// Calls a Lambda function, passing in information about the detector model +// Calls an AWS Lambda function, passing in information about the detector model // instance and the event which triggered the action. type LambdaAction struct { _ struct{} `type:"structure"` - // The ARN of the Lambda function which is executed. + // The ARN of the AWS Lambda function which is executed. // // FunctionArn is a required field FunctionArn *string `locationName:"functionArn" min:"1" type:"string" required:"true"` @@ -4001,11 +4034,11 @@ func (s *SetVariableAction) SetVariableName(v string) *SetVariableAction { } // Sends information about the detector model instance and the event which triggered -// the action to an AWS SQS queue. +// the action to an Amazon SQS queue. type SqsAction struct { _ struct{} `type:"structure"` - // The URL of the SQS queue where the data is written. + // The URL of the Amazon SQS queue where the data is written. // // QueueUrl is a required field QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` @@ -4447,6 +4480,11 @@ type UpdateDetectorModelInput struct { // DetectorModelName is a required field DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + // The ARN of the role that grants permission to AWS IoT Events to perform its // operations. // @@ -4512,6 +4550,12 @@ func (s *UpdateDetectorModelInput) SetDetectorModelName(v string) *UpdateDetecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *UpdateDetectorModelInput) SetEvaluationMethod(v string) *UpdateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *UpdateDetectorModelInput) SetRoleArn(v string) *UpdateDetectorModelInput { s.RoleArn = &v @@ -4656,6 +4700,14 @@ const ( DetectorModelVersionStatusFailed = "FAILED" ) +const ( + // EvaluationMethodBatch is a EvaluationMethod enum value + EvaluationMethodBatch = "BATCH" + + // EvaluationMethodSerial is a EvaluationMethod enum value + EvaluationMethodSerial = "SERIAL" +) + const ( // InputStatusCreating is a InputStatus enum value InputStatusCreating = "CREATING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go index af03d02a01a..13eeb1dfb3c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTEvents { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "iotevents" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoTEvents { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTEvents { svc := &IoTEvents{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-07-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go index ce7262a6ac3..f95dd4a89da 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go @@ -1097,7 +1097,7 @@ func (c *Kafka) ListConfigurationRevisionsRequest(input *ListConfigurationRevisi // ListConfigurationRevisions API operation for Managed Streaming for Kafka. // -// Returns a list of all the MSK configurations in this Region. +// Returns a list of all the revisions of an MSK configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1745,6 +1745,99 @@ func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceIn return out, req.Send() } +const opUpdateBrokerCount = "UpdateBrokerCount" + +// UpdateBrokerCountRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBrokerCount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBrokerCount for more information on using the UpdateBrokerCount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBrokerCountRequest method. +// req, resp := client.UpdateBrokerCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCountRequest(input *UpdateBrokerCountInput) (req *request.Request, output *UpdateBrokerCountOutput) { + op := &request.Operation{ + Name: opUpdateBrokerCount, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/nodes/count", + } + + if input == nil { + input = &UpdateBrokerCountInput{} + } + + output = &UpdateBrokerCountOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBrokerCount API operation for Managed Streaming for Kafka. +// +// Updates the number of broker nodes in the cluster. You can use this operation +// to increase the number of brokers in an existing cluster. You can't decrease +// the number of brokers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateBrokerCount for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCount(input *UpdateBrokerCountInput) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + return out, req.Send() +} + +// UpdateBrokerCountWithContext is the same as UpdateBrokerCount with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBrokerCount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateBrokerCountWithContext(ctx aws.Context, input *UpdateBrokerCountInput, opts ...request.Option) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateBrokerStorage = "UpdateBrokerStorage" // UpdateBrokerStorageRequest generates a "aws/request.Request" representing the @@ -1990,13 +2083,7 @@ func (s *BrokerEBSVolumeInfo) SetVolumeSizeGB(v int64) *BrokerEBSVolumeInfo { type BrokerNodeGroupInfo struct { _ struct{} `type:"structure"` - // The distribution of broker nodes across Availability Zones. This is an optional - // parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. - // You can also explicitly set this parameter to the value DEFAULT. No other - // values are currently allowed. - // - // Amazon MSK distributes the broker nodes evenly across the Availability Zones - // that correspond to the subnets you provide when you create the cluster. + // The distribution of broker nodes across Availability Zones. BrokerAZDistribution *string `locationName:"brokerAZDistribution" type:"string" enum:"BrokerAZDistribution"` // The list of subnets to connect to in the client virtual private cloud (VPC). @@ -2008,8 +2095,8 @@ type BrokerNodeGroupInfo struct { ClientSubnets []*string `locationName:"clientSubnets" type:"list" required:"true"` // The type of Amazon EC2 instances to use for Kafka brokers. The following - // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge,kafka.m5.4xlarge, - // kafka.m5.12xlarge, and kafka.m5.24xlarge. + // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, + // kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge. // // InstanceType is a required field InstanceType *string `locationName:"instanceType" min:"5" type:"string" required:"true"` @@ -2017,7 +2104,9 @@ type BrokerNodeGroupInfo struct { // The AWS security groups to associate with the elastic network interfaces // in order to specify who can connect to and communicate with the Amazon MSK // cluster. If you don't specify a security group, Amazon MSK uses the default - // security group associated with the VPC. + // security group associated with the VPC. If you specify security groups that + // were shared with you, you must ensure that you have permissions to them. + // Specifically, you need the ec2:DescribeSecurityGroups permission. SecurityGroups []*string `locationName:"securityGroups" type:"list"` // Contains information about storage volumes attached to MSK broker nodes. @@ -2252,7 +2341,9 @@ type ClusterInfo struct { // brokers in the cluster. CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` - // The current version of the MSK cluster. + // The current version of the MSK cluster. Cluster versions aren't simple integers. + // You can obtain the current version by describing the cluster. An example + // version is KTVPDKIKX0DER. CurrentVersion *string `locationName:"currentVersion" type:"string"` // Includes all encryption-related information. @@ -2505,7 +2596,8 @@ type Configuration struct { // LatestRevision is a required field LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure" required:"true"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` @@ -2673,8 +2765,7 @@ type CreateClusterInput struct { // ClusterName is a required field ClusterName *string `locationName:"clusterName" min:"1" type:"string" required:"true"` - // Represents the configuration that you want MSK to use for the brokers in - // a cluster. + // Represents the configuration that you want MSK to use for the cluster. ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` // Includes all encryption-related information. @@ -2862,7 +2953,8 @@ type CreateConfigurationInput struct { // KafkaVersions is a required field KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` @@ -2939,7 +3031,8 @@ type CreateConfigurationOutput struct { // Latest revision of the configuration. LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` } @@ -3241,7 +3334,8 @@ type DescribeConfigurationOutput struct { // Latest revision of the configuration. LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` } @@ -3485,7 +3579,7 @@ type EncryptionInTransit struct { _ struct{} `type:"structure"` // Indicates the encryption setting for data in transit between clients and - // brokers. The following are the possible values. + // brokers. You must set it to one of the following values. // // TLS means that client-broker communication is enabled with TLS only. // @@ -3495,7 +3589,7 @@ type EncryptionInTransit struct { // PLAINTEXT means that client-broker communication is enabled in plaintext // only. // - // The default value is TLS_PLAINTEXT. + // The default value is TLS. ClientBroker *string `locationName:"clientBroker" type:"string" enum:"ClientBroker"` // When set to true, it indicates that data communication among the broker nodes @@ -3657,7 +3751,9 @@ type GetBootstrapBrokersOutput struct { // A string containing one or more hostname:port pairs. BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` - // A string containing one or more DNS names (or IP) and TLS port pairs. + // A string containing one or more DNS names (or IP) and TLS port pairs. The + // following is an example. + // { "BootstrapBrokerStringTls": "b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094"} BootstrapBrokerStringTls *string `locationName:"bootstrapBrokerStringTls" type:"string"` } @@ -4497,6 +4593,111 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Request body for UpdateBrokerCount. +type UpdateBrokerCountInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // The current version of the cluster. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // The number of broker nodes that you want the cluster to have after this operation + // completes successfully. + // + // TargetNumberOfBrokerNodes is a required field + TargetNumberOfBrokerNodes *int64 `locationName:"targetNumberOfBrokerNodes" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateBrokerCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBrokerCountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBrokerCountInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetNumberOfBrokerNodes == nil { + invalidParams.Add(request.NewErrParamRequired("TargetNumberOfBrokerNodes")) + } + if s.TargetNumberOfBrokerNodes != nil && *s.TargetNumberOfBrokerNodes < 1 { + invalidParams.Add(request.NewErrParamMinValue("TargetNumberOfBrokerNodes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountInput) SetClusterArn(v string) *UpdateBrokerCountInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateBrokerCountInput) SetCurrentVersion(v string) *UpdateBrokerCountInput { + s.CurrentVersion = &v + return s +} + +// SetTargetNumberOfBrokerNodes sets the TargetNumberOfBrokerNodes field's value. +func (s *UpdateBrokerCountInput) SetTargetNumberOfBrokerNodes(v int64) *UpdateBrokerCountInput { + s.TargetNumberOfBrokerNodes = &v + return s +} + +// Response body for UpdateBrokerCount. +type UpdateBrokerCountOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateBrokerCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterArn(v string) *UpdateBrokerCountOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterOperationArn(v string) *UpdateBrokerCountOutput { + s.ClusterOperationArn = &v + return s +} + // Request object for UpdateBrokerStorage. type UpdateBrokerStorageInput struct { _ struct{} `type:"structure"` @@ -4513,6 +4714,11 @@ type UpdateBrokerStorageInput struct { // Describes the target volume size and the ID of the broker to apply the update // to. // + // The value you specify for Target-Volume-in-GiB must be a whole number that + // is greater than 100 GiB. + // + // The storage per broker after the update operation can't exceed 16384 GiB. + // // TargetBrokerEBSVolumeInfo is a required field TargetBrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"targetBrokerEBSVolumeInfo" type:"list" required:"true"` } @@ -4617,13 +4823,12 @@ type UpdateClusterConfigurationInput struct { // ClusterArn is a required field ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` - // Represents the configuration that you want MSK to use for the brokers in - // a cluster. + // Represents the configuration that you want MSK to use for the cluster. // // ConfigurationInfo is a required field ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure" required:"true"` - // The version of the cluster that needs to be updated. + // The version of the cluster that you want to update. // // CurrentVersion is a required field CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` @@ -4777,13 +4982,10 @@ func (s *ZookeeperNodeInfo) SetZookeeperVersion(v string) *ZookeeperNodeInfo { return s } -// The distribution of broker nodes across Availability Zones. This is an optional -// parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. -// You can also explicitly set this parameter to the value DEFAULT. No other -// values are currently allowed. -// -// Amazon MSK distributes the broker nodes evenly across the Availability Zones -// that correspond to the subnets you provide when you create the cluster. +// The distribution of broker nodes across Availability Zones. By default, broker +// nodes are distributed among the Availability Zones of your Region. Currently, +// the only supported value is DEFAULT. You can either specify this value explicitly +// or leave it out. const ( // BrokerAZDistributionDefault is a BrokerAZDistribution enum value BrokerAZDistributionDefault = "DEFAULT" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go index 577c3777e17..9c07694b13a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kafka { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kafka" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kafka { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kafka { svc := &Kafka{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index 888f7a4363f..b282cf645b7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -7379,6 +7379,8 @@ type SubscribeToShardEventStream struct { // may result in resource leaks. func (es *SubscribeToShardEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -7388,8 +7390,6 @@ func (es *SubscribeToShardEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go index 7c3e8c48a28..6c561f1e8eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kinesis { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kinesis { svc := &Kinesis{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-12-02", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go index 153daad6eba..d6375e1a205 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalytics { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalytics { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalytics { svc := &KinesisAnalytics{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-14", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go index d066c2e6c70..6399b45ae45 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalyticsV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kinesisanalytics" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { svc := &KinesisAnalyticsV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go index a723794cd00..dc21b52f359 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisvideo.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisVideo { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisVideo { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisVideo { svc := &KinesisVideo{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-30", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index 6d062f32fc8..30a7b6875d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -46,11 +46,11 @@ const ( // svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KMS { svc := &KMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go index 495ab0b69bf..a13b061c64d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *LakeFormation { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "lakeformation" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LakeFormation { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LakeFormation { svc := &LakeFormation{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go index 1cccdda0842..f137393fc0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -46,11 +46,11 @@ const ( // svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lambda { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lambda { svc := &Lambda{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-03-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go index 0f9509ff725..98937996781 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *LexModelBuildingService if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "lex" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LexModelBuildingService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LexModelBuildingService { svc := &LexModelBuildingService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go index 5a8fc996539..fca9b9c9f47 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go @@ -46,11 +46,11 @@ const ( // svc := licensemanager.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *LicenseManager { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LicenseManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LicenseManager { svc := &LicenseManager{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go index b9f97faa8a6..092633b75a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go @@ -46,11 +46,11 @@ const ( // svc := lightsail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lightsail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lightsail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lightsail { svc := &Lightsail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go index 0b38598f0fe..9e5f20a2699 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go @@ -46,11 +46,11 @@ const ( // svc := macie.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Macie { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Macie { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Macie { svc := &Macie{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-12-19", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go index d3049e6c546..acd42867a67 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ManagedBlockchain { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "managedblockchain" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ManagedBlockchain { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ManagedBlockchain { svc := &ManagedBlockchain{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-24", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go index 218d8e1905a..72aacea0b1c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConnect { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconnect" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConnect { svc := &MediaConnect{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go index 43d8865f45f..beada6a05f0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConvert { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconvert" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConvert { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConvert { svc := &MediaConvert{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-29", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go index 621855b02ad..885e54d4a7a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaLive { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "medialive" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaLive { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaLive { svc := &MediaLive{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go index e08a3e8787c..70659b45953 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaPackage { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediapackage" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaPackage { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaPackage { svc := &MediaPackage{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-12", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go index 492b3bf04c3..0dbef72bfa9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStore { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStore { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStore { svc := &MediaStore{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go index 4203140146d..0fcbc2141b6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStoreData { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStoreData { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStoreData { svc := &MediaStoreData{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go index 4baaa385e39..d218f98529e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MQ { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mq" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MQ { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MQ { svc := &MQ{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go index 3ddc5e5fba7..d5bb045c0c5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Neptune { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Neptune { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Neptune { svc := &Neptune{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index a1a8307cae4..6abe01ab353 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -46,11 +46,11 @@ const ( // svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *OpsWorks { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *OpsWorks { svc := &OpsWorks{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-02-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go index 565c1715f3c..37d5a2dcd4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go @@ -46,11 +46,11 @@ const ( // svc := organizations.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Organizations { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Organizations { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Organizations { svc := &Organizations{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go index bea02eca744..1c330645b39 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go @@ -5160,6 +5160,18 @@ type CreateSolutionVersionInput struct { // // SolutionArn is a required field SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` + + // The scope of training to be performed when creating the solution version. + // The FULL option trains the solution version based on the entirety of the + // input solution's training data, while the UPDATE option processes only the + // data that has changed in comparison to the input solution. Choose UPDATE + // when you want to incrementally update your solution version instead of creating + // an entirely new one. + // + // The UPDATE option can only be used when you already have an active solution + // version created from the input solution using the FULL option and the input + // solution was trained with the native-recipe-hrnn-coldstart recipe. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` } // String returns the string representation @@ -5191,6 +5203,12 @@ func (s *CreateSolutionVersionInput) SetSolutionArn(v string) *CreateSolutionVer return s } +// SetTrainingMode sets the TrainingMode field's value. +func (s *CreateSolutionVersionInput) SetTrainingMode(v string) *CreateSolutionVersionInput { + s.TrainingMode = &v + return s +} + type CreateSolutionVersionOutput struct { _ struct{} `type:"structure"` @@ -7492,10 +7510,12 @@ func (s *HPOObjective) SetType(v string) *HPOObjective { type HPOResourceConfig struct { _ struct{} `type:"structure"` - // The maximum number of training jobs. + // The maximum number of training jobs when you create a solution version. The + // maximum value for maxNumberOfTrainingJobs is 40. MaxNumberOfTrainingJobs *string `locationName:"maxNumberOfTrainingJobs" type:"string"` - // The maximum number of parallel training jobs. + // The maximum number of parallel training jobs when you create a solution version. + // The maximum value for maxParallelTrainingJobs is 10. MaxParallelTrainingJobs *string `locationName:"maxParallelTrainingJobs" type:"string"` } @@ -8891,15 +8911,15 @@ type SolutionVersion struct { // the model. EventType *string `locationName:"eventType" type:"string"` - // If training a solution version fails, the reason behind the failure. + // If training a solution version fails, the reason for the failure. FailureReason *string `locationName:"failureReason" type:"string"` // The date and time (in Unix time) that the solution was last updated. LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` - // When true, Amazon Personalize performs a search for the most optimal recipe - // according to the solution configuration. When false (the default), Amazon - // Personalize uses recipeArn. + // When true, Amazon Personalize searches for the most optimal recipe according + // to the solution configuration. When false (the default), Amazon Personalize + // uses recipeArn. PerformAutoML *bool `locationName:"performAutoML" type:"boolean"` // Whether to perform hyperparameter optimization (HPO) on the chosen recipe. @@ -8922,11 +8942,30 @@ type SolutionVersion struct { // // A solution version can be in one of the following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // * CREATE PENDING + // + // * CREATE IN_PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED Status *string `locationName:"status" type:"string"` - // The time used to train the model. + // The time used to train the model. You are billed for the time it takes to + // train a model. This field is visible only after Amazon Personalize successfully + // trains a model. TrainingHours *float64 `locationName:"trainingHours" type:"double"` + + // The scope of training used to create the solution version. The FULL option + // trains the solution version based on the entirety of the input solution's + // training data, while the UPDATE option processes only the training data that + // has changed since the creation of the last solution version. Choose UPDATE + // when you want to start recommending items added to the dataset without retraining + // the model. + // + // The UPDATE option can only be used after you've created a solution version + // with the FULL option and the training solution uses the native-recipe-hrnn-coldstart. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` } // String returns the string representation @@ -9017,6 +9056,12 @@ func (s *SolutionVersion) SetTrainingHours(v float64) *SolutionVersion { return s } +// SetTrainingMode sets the TrainingMode field's value. +func (s *SolutionVersion) SetTrainingMode(v string) *SolutionVersion { + s.TrainingMode = &v + return s +} + // Provides a summary of the properties of a solution version. For a complete // listing, call the DescribeSolutionVersion API. type SolutionVersionSummary struct { @@ -9169,3 +9214,11 @@ const ( // RecipeProviderService is a RecipeProvider enum value RecipeProviderService = "SERVICE" ) + +const ( + // TrainingModeFull is a TrainingMode enum value + TrainingModeFull = "FULL" + + // TrainingModeUpdate is a TrainingMode enum value + TrainingModeUpdate = "UPDATE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go index 4b1ff03e3de..3ec8ede0c9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Personalize { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "personalize" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Personalize { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Personalize { svc := &Personalize{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-22", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go index 487964c89cc..2a27113746c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pinpoint { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mobiletargeting" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pinpoint { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pinpoint { svc := &Pinpoint{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go index 90ff33d0a08..90f54acd02f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pricing { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "pricing" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pricing { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pricing { svc := &Pricing{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-15", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go index c24c67a5b23..ea80bc02e1f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *QLDB { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "qldb" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *QLDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QLDB { svc := &QLDB{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-01-02", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go index 3ba978ad6a5..28623f680ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go @@ -46,11 +46,11 @@ const ( // svc := quicksight.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *QuickSight { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *QuickSight { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QuickSight { svc := &QuickSight{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go index e1e1dc6683e..cf09e5ce963 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go @@ -46,11 +46,11 @@ const ( // svc := ram.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RAM { svc := &RAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-04", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index bbba1da65fa..7cee4532fc2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -534,8 +534,8 @@ func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityG // The state of the DB security group doesn't allow deletion. // // * ErrCodeAuthorizationAlreadyExistsFault "AuthorizationAlreadyExists" -// The specified CIDRIP or Amazon EC2 security group is already authorized for -// the specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group is already authorized +// for the specified DB security group. // // * ErrCodeAuthorizationQuotaExceededFault "AuthorizationQuotaExceeded" // The DB security group authorization quota has been reached. @@ -1157,6 +1157,98 @@ func (c *RDS) CopyOptionGroupWithContext(ctx aws.Context, input *CopyOptionGroup return out, req.Send() } +const opCreateCustomAvailabilityZone = "CreateCustomAvailabilityZone" + +// CreateCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCustomAvailabilityZone for more information on using the CreateCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCustomAvailabilityZoneRequest method. +// req, resp := client.CreateCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZoneRequest(input *CreateCustomAvailabilityZoneInput) (req *request.Request, output *CreateCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opCreateCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomAvailabilityZoneInput{} + } + + output = &CreateCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Creates a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation CreateCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneAlreadyExistsFault "CustomAvailabilityZoneAlreadyExists" +// CustomAvailabilityZoneName is already used by an existing custom Availability +// Zone. +// +// * ErrCodeCustomAvailabilityZoneQuotaExceededFault "CustomAvailabilityZoneQuotaExceeded" +// You have exceeded the maximum number of custom Availability Zones. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZone(input *CreateCustomAvailabilityZoneInput) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// CreateCustomAvailabilityZoneWithContext is the same as CreateCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) CreateCustomAvailabilityZoneWithContext(ctx aws.Context, input *CreateCustomAvailabilityZoneInput, opts ...request.Option) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDBCluster = "CreateDBCluster" // CreateDBClusterRequest generates a "aws/request.Request" representing the @@ -1712,11 +1804,11 @@ func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -2572,6 +2664,95 @@ func (c *RDS) CreateOptionGroupWithContext(ctx aws.Context, input *CreateOptionG return out, req.Send() } +const opDeleteCustomAvailabilityZone = "DeleteCustomAvailabilityZone" + +// DeleteCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCustomAvailabilityZone for more information on using the DeleteCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCustomAvailabilityZoneRequest method. +// req, resp := client.DeleteCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZoneRequest(input *DeleteCustomAvailabilityZoneInput) (req *request.Request, output *DeleteCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opDeleteCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomAvailabilityZoneInput{} + } + + output = &DeleteCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Deletes a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZone(input *DeleteCustomAvailabilityZoneInput) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// DeleteCustomAvailabilityZoneWithContext is the same as DeleteCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteCustomAvailabilityZoneWithContext(ctx aws.Context, input *DeleteCustomAvailabilityZoneInput, opts ...request.Option) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDBCluster = "DeleteDBCluster" // DeleteDBClusterRequest generates a "aws/request.Request" representing the @@ -3654,6 +3835,86 @@ func (c *RDS) DeleteGlobalClusterWithContext(ctx aws.Context, input *DeleteGloba return out, req.Send() } +const opDeleteInstallationMedia = "DeleteInstallationMedia" + +// DeleteInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInstallationMedia for more information on using the DeleteInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInstallationMediaRequest method. +// req, resp := client.DeleteInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMediaRequest(input *DeleteInstallationMediaInput) (req *request.Request, output *DeleteInstallationMediaOutput) { + op := &request.Operation{ + Name: opDeleteInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstallationMediaInput{} + } + + output = &DeleteInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInstallationMedia API operation for Amazon Relational Database Service. +// +// Deletes the installation media for an on-premises, bring your own media (BYOM) +// DB engine, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation media. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMedia(input *DeleteInstallationMediaInput) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + return out, req.Send() +} + +// DeleteInstallationMediaWithContext is the same as DeleteInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteInstallationMediaWithContext(ctx aws.Context, input *DeleteInstallationMediaInput, opts ...request.Option) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteOptionGroup = "DeleteOptionGroup" // DeleteOptionGroupRequest generates a "aws/request.Request" representing the @@ -3895,6 +4156,148 @@ func (c *RDS) DescribeCertificatesWithContext(ctx aws.Context, input *DescribeCe return out, req.Send() } +const opDescribeCustomAvailabilityZones = "DescribeCustomAvailabilityZones" + +// DescribeCustomAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomAvailabilityZones operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCustomAvailabilityZones for more information on using the DescribeCustomAvailabilityZones +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCustomAvailabilityZonesRequest method. +// req, resp := client.DescribeCustomAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZonesRequest(input *DescribeCustomAvailabilityZonesInput) (req *request.Request, output *DescribeCustomAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeCustomAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCustomAvailabilityZonesInput{} + } + + output = &DescribeCustomAvailabilityZonesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCustomAvailabilityZones API operation for Amazon Relational Database Service. +// +// Returns information about custom Availability Zones (AZs). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeCustomAvailabilityZones for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZones(input *DescribeCustomAvailabilityZonesInput) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesWithContext is the same as DescribeCustomAvailabilityZones with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCustomAvailabilityZones for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, opts ...request.Option) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesPages iterates over the pages of a DescribeCustomAvailabilityZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCustomAvailabilityZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCustomAvailabilityZones operation. +// pageNum := 0 +// err := client.DescribeCustomAvailabilityZonesPages(params, +// func(page *rds.DescribeCustomAvailabilityZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeCustomAvailabilityZonesPages(input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool) error { + return c.DescribeCustomAvailabilityZonesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCustomAvailabilityZonesPagesWithContext same as DescribeCustomAvailabilityZonesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesPagesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCustomAvailabilityZonesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCustomAvailabilityZonesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeCustomAvailabilityZonesOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeDBClusterBacktracks = "DescribeDBClusterBacktracks" // DescribeDBClusterBacktracksRequest generates a "aws/request.Request" representing the @@ -6564,6 +6967,142 @@ func (c *RDS) DescribeGlobalClustersPagesWithContext(ctx aws.Context, input *Des return p.Err() } +const opDescribeInstallationMedia = "DescribeInstallationMedia" + +// DescribeInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstallationMedia for more information on using the DescribeInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstallationMediaRequest method. +// req, resp := client.DescribeInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMediaRequest(input *DescribeInstallationMediaInput) (req *request.Request, output *DescribeInstallationMediaOutput) { + op := &request.Operation{ + Name: opDescribeInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstallationMediaInput{} + } + + output = &DescribeInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstallationMedia API operation for Amazon Relational Database Service. +// +// Describes the available installation media for on-premises, bring your own +// media (BYOM) DB engines, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation media. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMedia(input *DescribeInstallationMediaInput) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + return out, req.Send() +} + +// DescribeInstallationMediaWithContext is the same as DescribeInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, opts ...request.Option) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstallationMediaPages iterates over the pages of a DescribeInstallationMedia operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstallationMedia method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstallationMedia operation. +// pageNum := 0 +// err := client.DescribeInstallationMediaPages(params, +// func(page *rds.DescribeInstallationMediaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeInstallationMediaPages(input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool) error { + return c.DescribeInstallationMediaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstallationMediaPagesWithContext same as DescribeInstallationMediaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaPagesWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstallationMediaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstallationMediaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeInstallationMediaOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" // DescribeOptionGroupOptionsRequest generates a "aws/request.Request" representing the @@ -7708,6 +8247,90 @@ func (c *RDS) FailoverDBClusterWithContext(ctx aws.Context, input *FailoverDBClu return out, req.Send() } +const opImportInstallationMedia = "ImportInstallationMedia" + +// ImportInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportInstallationMedia for more information on using the ImportInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportInstallationMediaRequest method. +// req, resp := client.ImportInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMediaRequest(input *ImportInstallationMediaInput) (req *request.Request, output *ImportInstallationMediaOutput) { + op := &request.Operation{ + Name: opImportInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstallationMediaInput{} + } + + output = &ImportInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportInstallationMedia API operation for Amazon Relational Database Service. +// +// Imports the installation media for an on-premises, bring your own media (BYOM) +// DB engine, such as SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation ImportInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeInstallationMediaAlreadyExistsFault "InstallationMediaAlreadyExists" +// The specified installation media has already been imported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMedia(input *ImportInstallationMediaInput) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + return out, req.Send() +} + +// ImportInstallationMediaWithContext is the same as ImportInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See ImportInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) ImportInstallationMediaWithContext(ctx aws.Context, input *ImportInstallationMediaInput, opts ...request.Option) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -8430,11 +9053,11 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeCertificateNotFoundFault "CertificateNotFound" // CertificateIdentifier doesn't refer to an existing certificate. @@ -9710,7 +10333,7 @@ func (c *RDS) RemoveRoleFromDBInstanceRequest(input *RemoveRoleFromDBInstanceInp // DBInstanceIdentifier doesn't refer to an existing DB instance. // // * ErrCodeDBInstanceRoleNotFoundFault "DBInstanceRoleNotFound" -// The specified RoleArn value doesn't match the specifed feature for the DB +// The specified RoleArn value doesn't match the specified feature for the DB // instance. // // * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" @@ -10661,11 +11284,11 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFro // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10813,11 +11436,11 @@ func (c *RDS) RestoreDBInstanceFromS3Request(input *RestoreDBInstanceFromS3Input // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10966,11 +11589,11 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -11072,11 +11695,11 @@ func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIn // DBSecurityGroupName doesn't refer to an existing DB security group. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeInvalidDBSecurityGroupStateFault "InvalidDBSecurityGroupState" // The state of the DB security group doesn't allow deletion. @@ -11384,11 +12007,11 @@ func (c *RDS) StartDBInstanceRequest(input *StartDBInstanceInput) (req *request. // DBClusterIdentifier doesn't refer to an existing DB cluster. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -13572,6 +14195,105 @@ func (s *CopyOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CopyOptionGroupO return s } +type CreateCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The name of the custom Availability Zone (AZ). + // + // CustomAvailabilityZoneName is a required field + CustomAvailabilityZoneName *string `type:"string" required:"true"` + + // The ID of an existing virtual private network (VPN) between the Amazon RDS + // website and the VMware vSphere cluster. + ExistingVpnId *string `type:"string"` + + // The name of a new VPN tunnel between the Amazon RDS website and the VMware + // vSphere cluster. + // + // Specify this parameter only if ExistingVpnId is not specified. + NewVpnTunnelName *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + // + // Specify this parameter only if ExistingVpnId is not specified. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneName == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetCustomAvailabilityZoneName(v string) *CreateCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetExistingVpnId sets the ExistingVpnId field's value. +func (s *CreateCustomAvailabilityZoneInput) SetExistingVpnId(v string) *CreateCustomAvailabilityZoneInput { + s.ExistingVpnId = &v + return s +} + +// SetNewVpnTunnelName sets the NewVpnTunnelName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetNewVpnTunnelName(v string) *CreateCustomAvailabilityZoneInput { + s.NewVpnTunnelName = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *CreateCustomAvailabilityZoneInput) SetVpnTunnelOriginatorIP(v string) *CreateCustomAvailabilityZoneInput { + s.VpnTunnelOriginatorIP = &v + return s +} + +type CreateCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *CreateCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *CreateCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type CreateDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -14628,6 +15350,13 @@ type CreateDBInstanceInput struct { // Constraint: The AvailabilityZone parameter can't be specified if the DB instance // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. + // + // If you're creating a DB instance in an RDS on VMware environment, specify + // the identifier of the custom Availability Zone to create the DB instance + // in. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) AvailabilityZone *string `type:"string"` // The number of days for which automated backups are retained. Setting this @@ -16968,6 +17697,64 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr return s } +// A custom Availability Zone (AZ) is an on-premises AZ that is integrated with +// a VMware vSphere cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type CustomAvailabilityZone struct { + _ struct{} `type:"structure"` + + // The identifier of the custom AZ. + // + // Amazon RDS generates a unique identifier when a custom AZ is created. + CustomAvailabilityZoneId *string `type:"string"` + + // The name of the custom AZ. + CustomAvailabilityZoneName *string `type:"string"` + + // The status of the custom AZ. + CustomAvailabilityZoneStatus *string `type:"string"` + + // Information about the virtual private network (VPN) between the VMware vSphere + // cluster and the AWS website. + VpnDetails *VpnDetails `type:"structure"` +} + +// String returns the string representation +func (s CustomAvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomAvailabilityZone) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneId(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneName(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetCustomAvailabilityZoneStatus sets the CustomAvailabilityZoneStatus field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneStatus(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneStatus = &v + return s +} + +// SetVpnDetails sets the VpnDetails field's value. +func (s *CustomAvailabilityZone) SetVpnDetails(v *VpnDetails) *CustomAvailabilityZone { + s.VpnDetails = v + return s +} + // Contains the details of an Amazon Aurora DB cluster. // // This data type is used as a response element in the DescribeDBClusters, StopDBCluster, @@ -19980,6 +20767,71 @@ func (s *DBSubnetGroup) SetVpcId(v string) *DBSubnetGroup { return s } +type DeleteCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteCustomAvailabilityZoneInput) SetCustomAvailabilityZoneId(v string) *DeleteCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneId = &v + return s +} + +type DeleteCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *DeleteCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *DeleteCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type DeleteDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -20939,6 +21791,133 @@ func (s *DeleteGlobalClusterOutput) SetGlobalCluster(v *GlobalCluster) *DeleteGl return s } +type DeleteInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The installation media ID. + // + // InstallationMediaId is a required field + InstallationMediaId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstallationMediaInput"} + if s.InstallationMediaId == nil { + invalidParams.Add(request.NewErrParamRequired("InstallationMediaId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaInput) SetInstallationMediaId(v string) *DeleteInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type DeleteInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DeleteInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *DeleteInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DeleteInstallationMediaOutput) SetEngine(v string) *DeleteInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *DeleteInstallationMediaOutput) SetEngineVersion(v string) *DeleteInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *DeleteInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *DeleteInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaOutput) SetInstallationMediaId(v string) *DeleteInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetOSInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteInstallationMediaOutput) SetStatus(v string) *DeleteInstallationMediaOutput { + s.Status = &v + return s +} + type DeleteOptionGroupInput struct { _ struct{} `type:"structure"` @@ -21054,7 +22033,7 @@ type DescribeCertificatesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21151,6 +22130,119 @@ func (s *DescribeCertificatesOutput) SetMarker(v string) *DescribeCertificatesOu return s } +type DescribeCustomAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. If this parameter is specified, information from + // only the specific custom AZ is returned. + CustomAvailabilityZoneId *string `type:"string"` + + // A filter that specifies one or more custom AZs to describe. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that you can retrieve the remaining results. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCustomAvailabilityZonesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCustomAvailabilityZonesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetCustomAvailabilityZoneId(v string) *DescribeCustomAvailabilityZonesInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetFilters(v []*Filter) *DescribeCustomAvailabilityZonesInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMarker(v string) *DescribeCustomAvailabilityZonesInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMaxRecords(v int64) *DescribeCustomAvailabilityZonesInput { + s.MaxRecords = &v + return s +} + +type DescribeCustomAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // The list of CustomAvailabilityZone objects for the AWS account. + CustomAvailabilityZones []*CustomAvailabilityZone `locationNameList:"CustomAvailabilityZone" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZones sets the CustomAvailabilityZones field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetCustomAvailabilityZones(v []*CustomAvailabilityZone) *DescribeCustomAvailabilityZonesOutput { + s.CustomAvailabilityZones = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetMarker(v string) *DescribeCustomAvailabilityZonesOutput { + s.Marker = &v + return s +} + type DescribeDBClusterBacktracksInput struct { _ struct{} `type:"structure"` @@ -21201,7 +22293,7 @@ type DescribeDBClusterBacktracksInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21334,7 +22426,7 @@ type DescribeDBClusterEndpointsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21457,7 +22549,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21576,7 +22668,7 @@ type DescribeDBClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21814,7 +22906,7 @@ type DescribeDBClusterSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21994,7 +23086,7 @@ type DescribeDBClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22148,7 +23240,7 @@ type DescribeDBEngineVersionsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22322,7 +23414,7 @@ type DescribeDBInstanceAutomatedBackupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -22465,7 +23557,7 @@ type DescribeDBInstancesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22639,7 +23731,7 @@ type DescribeDBLogFilesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -22771,7 +23863,7 @@ type DescribeDBParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22891,7 +23983,7 @@ type DescribeDBParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23021,7 +24113,7 @@ type DescribeDBSecurityGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23251,7 +24343,7 @@ type DescribeDBSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23425,7 +24517,7 @@ type DescribeDBSubnetGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23542,7 +24634,7 @@ type DescribeEngineDefaultClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23649,7 +24741,7 @@ type DescribeEngineDefaultParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23829,7 +24921,7 @@ type DescribeEventSubscriptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23958,7 +25050,7 @@ type DescribeEventsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24145,7 +25237,7 @@ type DescribeGlobalClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24241,6 +25333,123 @@ func (s *DescribeGlobalClustersOutput) SetMarker(v string) *DescribeGlobalCluste return s } +type DescribeInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more installation media to describe. Supported + // filters include the following: + // + // * custom-availability-zone-id - Accepts custom Availability Zone (AZ) + // identifiers. The results list includes information about only the custom + // AZs identified by these identifiers. + // + // * engine - Accepts database engines. The results list includes information + // about only the database engines identified by these identifiers. For more + // information about the valid engines for installation media, see ImportInstallationMedia. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstallationMediaInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstallationMediaInput) SetFilters(v []*Filter) *DescribeInstallationMediaInput { + s.Filters = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DescribeInstallationMediaInput) SetInstallationMediaId(v string) *DescribeInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaInput) SetMarker(v string) *DescribeInstallationMediaInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeInstallationMediaInput) SetMaxRecords(v int64) *DescribeInstallationMediaInput { + s.MaxRecords = &v + return s +} + +type DescribeInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The list of InstallationMedia objects for the AWS account. + InstallationMedia []*InstallationMedia `locationNameList:"InstallationMedia" type:"list"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetInstallationMedia sets the InstallationMedia field's value. +func (s *DescribeInstallationMediaOutput) SetInstallationMedia(v []*InstallationMedia) *DescribeInstallationMediaOutput { + s.InstallationMedia = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaOutput) SetMarker(v string) *DescribeInstallationMediaOutput { + s.Marker = &v + return s +} + type DescribeOptionGroupOptionsInput struct { _ struct{} `type:"structure"` @@ -24263,7 +25472,7 @@ type DescribeOptionGroupOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24390,7 +25599,7 @@ type DescribeOptionGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24533,7 +25742,7 @@ type DescribeOrderableDBInstanceOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24686,7 +25895,7 @@ type DescribePendingMaintenanceActionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24816,7 +26025,7 @@ type DescribeReservedDBInstancesInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24965,7 +26174,7 @@ type DescribeReservedDBInstancesOfferingsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the reamaining results. // // Default: 100 // @@ -25164,7 +26373,7 @@ type DescribeSourceRegionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -26276,6 +27485,334 @@ func (s *IPRange) SetStatus(v string) *IPRange { return s } +type ImportInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The identifier of the custom Availability Zone (AZ) to import the installation + // media to. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` + + // The name of the database engine to be used for this instance. + // + // The list only includes supported on-premises, bring your own media (BYOM) + // DB engines. + // + // Valid Values: + // + // * sqlserver-ee + // + // * sqlserver-se + // + // * sqlserver-ex + // + // * sqlserver-web + // + // Engine is a required field + Engine *string `type:"string" required:"true"` + + // The path to the installation media for the specified DB engine. + // + // Example: SQLServerISO/en_sql_server_2016_enterprise_x64_dvd_8701793.iso + // + // EngineInstallationMediaPath is a required field + EngineInstallationMediaPath *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // For a list of valid engine versions, call DescribeDBEngineVersions. + // + // The following are the database engines and links to information about the + // major and minor versions. The list only includes supported on-premises, bring + // your own media (BYOM) DB engines. + // + // Microsoft SQL Server + // + // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // in the Amazon RDS User Guide. + // + // EngineVersion is a required field + EngineVersion *string `type:"string" required:"true"` + + // The path to the installation media for the operating system associated with + // the specified DB engine. + // + // Example: WindowsISO/en_windows_server_2016_x64_dvd_9327751.iso + // + // OSInstallationMediaPath is a required field + OSInstallationMediaPath *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportInstallationMediaInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.EngineInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("EngineInstallationMediaPath")) + } + if s.EngineVersion == nil { + invalidParams.Add(request.NewErrParamRequired("EngineVersion")) + } + if s.OSInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("OSInstallationMediaPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaInput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaInput) SetEngine(v string) *ImportInstallationMediaInput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaInput) SetEngineVersion(v string) *ImportInstallationMediaInput { + s.EngineVersion = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.OSInstallationMediaPath = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type ImportInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ImportInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaOutput) SetEngine(v string) *ImportInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaOutput) SetEngineVersion(v string) *ImportInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *ImportInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *ImportInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *ImportInstallationMediaOutput) SetInstallationMediaId(v string) *ImportInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportInstallationMediaOutput) SetStatus(v string) *ImportInstallationMediaOutput { + s.Status = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type InstallationMedia struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMedia) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMedia) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *InstallationMedia) SetCustomAvailabilityZoneId(v string) *InstallationMedia { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *InstallationMedia) SetEngine(v string) *InstallationMedia { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *InstallationMedia) SetEngineInstallationMediaPath(v string) *InstallationMedia { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *InstallationMedia) SetEngineVersion(v string) *InstallationMedia { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *InstallationMedia) SetFailureCause(v *InstallationMediaFailureCause) *InstallationMedia { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *InstallationMedia) SetInstallationMediaId(v string) *InstallationMedia { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *InstallationMedia) SetOSInstallationMediaPath(v string) *InstallationMedia { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstallationMedia) SetStatus(v string) *InstallationMedia { + s.Status = &v + return s +} + +// Contains the cause of an installation media failure. Installation media is +// used for on-premises, bring your own media (BYOM) DB engines, such as Microsoft +// SQL Server. +type InstallationMediaFailureCause struct { + _ struct{} `type:"structure"` + + // The reason that an installation media import failed. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMediaFailureCause) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMediaFailureCause) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *InstallationMediaFailureCause) SetMessage(v string) *InstallationMediaFailureCause { + s.Message = &v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` @@ -35322,6 +36859,80 @@ func (s *VpcSecurityGroupMembership) SetVpcSecurityGroupId(v string) *VpcSecurit return s } +// Information about the virtual private network (VPN) between the VMware vSphere +// cluster and the AWS website. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type VpnDetails struct { + _ struct{} `type:"structure"` + + // The IP address of network traffic from AWS to your on-premises data center. + VpnGatewayIp *string `type:"string"` + + // The ID of the VPN. + VpnId *string `type:"string"` + + // The name of the VPN. + VpnName *string `type:"string"` + + // The preshared key (PSK) for the VPN. + VpnPSK *string `type:"string" sensitive:"true"` + + // The state of the VPN. + VpnState *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s VpnDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnDetails) GoString() string { + return s.String() +} + +// SetVpnGatewayIp sets the VpnGatewayIp field's value. +func (s *VpnDetails) SetVpnGatewayIp(v string) *VpnDetails { + s.VpnGatewayIp = &v + return s +} + +// SetVpnId sets the VpnId field's value. +func (s *VpnDetails) SetVpnId(v string) *VpnDetails { + s.VpnId = &v + return s +} + +// SetVpnName sets the VpnName field's value. +func (s *VpnDetails) SetVpnName(v string) *VpnDetails { + s.VpnName = &v + return s +} + +// SetVpnPSK sets the VpnPSK field's value. +func (s *VpnDetails) SetVpnPSK(v string) *VpnDetails { + s.VpnPSK = &v + return s +} + +// SetVpnState sets the VpnState field's value. +func (s *VpnDetails) SetVpnState(v string) *VpnDetails { + s.VpnState = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *VpnDetails) SetVpnTunnelOriginatorIP(v string) *VpnDetails { + s.VpnTunnelOriginatorIP = &v + return s +} + const ( // ActivityStreamModeSync is a ActivityStreamMode enum value ActivityStreamModeSync = "sync" diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go b/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go index bf5140b1477..6a263d8704d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go @@ -6,9 +6,9 @@ // // Amazon Relational Database Service (Amazon RDS) is a web service that makes // it easier to set up, operate, and scale a relational database in the cloud. -// It provides cost-efficient, resizable capacity for an industry-standard relational -// database and manages common database administration tasks, freeing up developers -// to focus on what makes their applications and businesses unique. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. // // Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, // Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go index 86ee24482b0..680626728f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go @@ -7,18 +7,18 @@ const ( // ErrCodeAuthorizationAlreadyExistsFault for service response error code // "AuthorizationAlreadyExists". // - // The specified CIDRIP or Amazon EC2 security group is already authorized for - // the specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group is already authorized + // for the specified DB security group. ErrCodeAuthorizationAlreadyExistsFault = "AuthorizationAlreadyExists" // ErrCodeAuthorizationNotFoundFault for service response error code // "AuthorizationNotFound". // - // The specified CIDRIP or Amazon EC2 security group isn't authorized for the - // specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group might not be authorized + // for the specified DB security group. // - // RDS also may not be authorized by using IAM to perform necessary actions - // on your behalf. + // Or, RDS might not be authorized to perform necessary actions using IAM on + // your behalf. ErrCodeAuthorizationNotFoundFault = "AuthorizationNotFound" // ErrCodeAuthorizationQuotaExceededFault for service response error code @@ -37,6 +37,26 @@ const ( // CertificateIdentifier doesn't refer to an existing certificate. ErrCodeCertificateNotFoundFault = "CertificateNotFound" + // ErrCodeCustomAvailabilityZoneAlreadyExistsFault for service response error code + // "CustomAvailabilityZoneAlreadyExists". + // + // CustomAvailabilityZoneName is already used by an existing custom Availability + // Zone. + ErrCodeCustomAvailabilityZoneAlreadyExistsFault = "CustomAvailabilityZoneAlreadyExists" + + // ErrCodeCustomAvailabilityZoneNotFoundFault for service response error code + // "CustomAvailabilityZoneNotFound". + // + // CustomAvailabilityZoneId doesn't refer to an existing custom Availability + // Zone identifier. + ErrCodeCustomAvailabilityZoneNotFoundFault = "CustomAvailabilityZoneNotFound" + + // ErrCodeCustomAvailabilityZoneQuotaExceededFault for service response error code + // "CustomAvailabilityZoneQuotaExceeded". + // + // You have exceeded the maximum number of custom Availability Zones. + ErrCodeCustomAvailabilityZoneQuotaExceededFault = "CustomAvailabilityZoneQuotaExceeded" + // ErrCodeDBClusterAlreadyExistsFault for service response error code // "DBClusterAlreadyExistsFault". // @@ -156,7 +176,7 @@ const ( // ErrCodeDBInstanceRoleNotFoundFault for service response error code // "DBInstanceRoleNotFound". // - // The specified RoleArn value doesn't match the specifed feature for the DB + // The specified RoleArn value doesn't match the specified feature for the DB // instance. ErrCodeDBInstanceRoleNotFoundFault = "DBInstanceRoleNotFound" @@ -300,6 +320,18 @@ const ( // "GlobalClusterQuotaExceededFault". ErrCodeGlobalClusterQuotaExceededFault = "GlobalClusterQuotaExceededFault" + // ErrCodeInstallationMediaAlreadyExistsFault for service response error code + // "InstallationMediaAlreadyExists". + // + // The specified installation media has already been imported. + ErrCodeInstallationMediaAlreadyExistsFault = "InstallationMediaAlreadyExists" + + // ErrCodeInstallationMediaNotFoundFault for service response error code + // "InstallationMediaNotFound". + // + // InstallationMediaID doesn't refer to an existing installation media. + ErrCodeInstallationMediaNotFoundFault = "InstallationMediaNotFound" + // ErrCodeInstanceQuotaExceededFault for service response error code // "InstanceQuotaExceeded". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go index f2d0efaf7d0..3ae523764c1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -46,11 +46,11 @@ const ( // svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RDS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RDS { svc := &RDS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go index a750d141c62..fa2fea7a89f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -46,11 +46,11 @@ const ( // svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Redshift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Redshift { svc := &Redshift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go index 46a19ff2316..82f50ca25d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ResourceGroups { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "resource-groups" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ResourceGroups { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ResourceGroups { svc := &ResourceGroups{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go index dd22cb2cd84..391c3e28615 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -46,11 +46,11 @@ const ( // svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53 { svc := &Route53{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-04-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go index 367e933ebb9..7b8dd4bf715 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go @@ -46,11 +46,11 @@ const ( // svc := route53resolver.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53Resolver { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53Resolver { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53Resolver { svc := &Route53Resolver{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index b4a4e8c4ad7..91bf5225ddf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -22464,6 +22464,41 @@ func (s SSES3) GoString() string { return s.String() } +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. + Start *int64 `type:"long"` +} + +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + // SelectObjectContentEventStream provides handling of EventStreams for // the SelectObjectContent API. // @@ -22503,6 +22538,8 @@ type SelectObjectContentEventStream struct { // may result in resource leaks. func (es *SelectObjectContentEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -22512,8 +22549,6 @@ func (es *SelectObjectContentEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } @@ -22738,6 +22773,12 @@ type SelectObjectContentInput struct { // The SSE Customer Key MD5. For more information, see Server-Side Encryption // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -22858,6 +22899,12 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index d17dcc9dadc..07e1297371b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -46,11 +46,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go index 377c9d55d55..827741ce235 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3Control { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "s3" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3Control { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3Control { svc := &S3Control{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-20", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go index ed54c0f3258..29e70ae97eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -26538,6 +26538,15 @@ const ( // NotebookInstanceAcceleratorTypeMlEia1Xlarge is a NotebookInstanceAcceleratorType enum value NotebookInstanceAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // NotebookInstanceAcceleratorTypeMlEia2Medium is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // NotebookInstanceAcceleratorTypeMlEia2Large is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // NotebookInstanceAcceleratorTypeMlEia2Xlarge is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26666,6 +26675,15 @@ const ( // ProductionVariantAcceleratorTypeMlEia1Xlarge is a ProductionVariantAcceleratorType enum value ProductionVariantAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // ProductionVariantAcceleratorTypeMlEia2Medium is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // ProductionVariantAcceleratorTypeMlEia2Large is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // ProductionVariantAcceleratorTypeMlEia2Xlarge is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26714,6 +26732,24 @@ const ( // ProductionVariantInstanceTypeMlM524xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlM524xlarge = "ml.m5.24xlarge" + // ProductionVariantInstanceTypeMlM5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dLarge = "ml.m5d.large" + + // ProductionVariantInstanceTypeMlM5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dXlarge = "ml.m5d.xlarge" + + // ProductionVariantInstanceTypeMlM5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d2xlarge = "ml.m5d.2xlarge" + + // ProductionVariantInstanceTypeMlM5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d4xlarge = "ml.m5d.4xlarge" + + // ProductionVariantInstanceTypeMlM5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d12xlarge = "ml.m5d.12xlarge" + + // ProductionVariantInstanceTypeMlM5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d24xlarge = "ml.m5d.24xlarge" + // ProductionVariantInstanceTypeMlC4Large is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC4Large = "ml.c4.large" @@ -26765,6 +26801,24 @@ const ( // ProductionVariantInstanceTypeMlC518xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC518xlarge = "ml.c5.18xlarge" + // ProductionVariantInstanceTypeMlC5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dLarge = "ml.c5d.large" + + // ProductionVariantInstanceTypeMlC5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dXlarge = "ml.c5d.xlarge" + + // ProductionVariantInstanceTypeMlC5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d2xlarge = "ml.c5d.2xlarge" + + // ProductionVariantInstanceTypeMlC5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d4xlarge = "ml.c5d.4xlarge" + + // ProductionVariantInstanceTypeMlC5d9xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d9xlarge = "ml.c5d.9xlarge" + + // ProductionVariantInstanceTypeMlC5d18xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d18xlarge = "ml.c5d.18xlarge" + // ProductionVariantInstanceTypeMlG4dnXlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlG4dnXlarge = "ml.g4dn.xlarge" @@ -26800,6 +26854,24 @@ const ( // ProductionVariantInstanceTypeMlR524xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlR524xlarge = "ml.r5.24xlarge" + + // ProductionVariantInstanceTypeMlR5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dLarge = "ml.r5d.large" + + // ProductionVariantInstanceTypeMlR5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dXlarge = "ml.r5d.xlarge" + + // ProductionVariantInstanceTypeMlR5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d2xlarge = "ml.r5d.2xlarge" + + // ProductionVariantInstanceTypeMlR5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d4xlarge = "ml.r5d.4xlarge" + + // ProductionVariantInstanceTypeMlR5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d12xlarge = "ml.r5d.12xlarge" + + // ProductionVariantInstanceTypeMlR5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d24xlarge = "ml.r5d.24xlarge" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go index 7ae1df73414..ae95a939639 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SageMaker { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "sagemaker" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SageMaker { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SageMaker { svc := &SageMaker{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go index c4758e96dac..eeca2d9fd68 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecretsManager { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "secretsmanager" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecretsManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecretsManager { svc := &SecretsManager{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-17", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go index 113ce37f3e1..cdb48ddf0c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecurityHub { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "securityhub" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecurityHub { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecurityHub { svc := &SecurityHub{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-10-26", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go index 5702ab93313..7ceaec7a2fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServerlessApplicationRep if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "serverlessrepo" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { svc := &ServerlessApplicationRepository{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-08", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go index f15a5b8024d..718d9a1486d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go @@ -46,11 +46,11 @@ const ( // svc := servicecatalog.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceCatalog { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceCatalog { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceCatalog { svc := &ServiceCatalog{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-10", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go index 3463e12c241..77d77772c90 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go @@ -46,11 +46,11 @@ const ( // svc := servicediscovery.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceDiscovery { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceDiscovery { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceDiscovery { svc := &ServiceDiscovery{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-14", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go index 6404a922552..4c064181c98 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go @@ -46,11 +46,11 @@ const ( // svc := servicequotas.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceQuotas { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceQuotas { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceQuotas { svc := &ServiceQuotas{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-06-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go index 0e33b771f53..09028e10453 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ses" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SES { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SES { svc := &SES{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go index 2436268f075..f21c6d9d82a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go @@ -46,11 +46,11 @@ const ( // svc := sfn.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SFN { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SFN { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SFN { svc := &SFN{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-23", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go index b7a62ef92f6..499dea15df5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go @@ -46,11 +46,11 @@ const ( // svc := shield.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Shield { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Shield { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Shield { svc := &Shield{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-06-02", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go index d4de27413cb..75fd3d60108 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -47,11 +47,11 @@ const ( // svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SimpleDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SimpleDB { svc := &SimpleDB{ Client: client.New( cfg, @@ -60,6 +60,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-04-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index 96d7c8ba05c..aa8aff7d6e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -46,11 +46,11 @@ const ( // svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SNS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SNS { svc := &SNS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-03-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go index d463ecf0ddb..7bac89c4ae9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -46,11 +46,11 @@ const ( // svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SQS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SQS { svc := &SQS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-11-05", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go index 9a6b8f71c22..c66bfba90cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -46,11 +46,11 @@ const ( // svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SSM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSM { svc := &SSM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go index 9a0c08f6962..1e4f312be8c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go @@ -46,11 +46,11 @@ const ( // svc := storagegateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *StorageGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *StorageGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *StorageGateway { svc := &StorageGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-06-30", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index eb0a6a417ef..9c5ed454536 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -676,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // Returned Error Codes: // * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// This error is returned if the message passed to DecodeAuthorizationMessage +// was invalid. This can happen if the token contains invalid characters, such +// as linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c356c..a3e378edad3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -34,9 +34,9 @@ const ( // ErrCodeInvalidAuthorizationMessageException for service response error code // "InvalidAuthorizationMessageException". // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. + // This error is returned if the message passed to DecodeAuthorizationMessage + // was invalid. This can happen if the token contains invalid characters, such + // as linebreaks. ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" // ErrCodeInvalidIdentityTokenException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d1b3..2c3c3d2c1ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -46,11 +46,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go index 014d89a5241..c30e411bd0f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go @@ -46,11 +46,11 @@ const ( // svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SWF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SWF { svc := &SWF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-01-25", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go index 575135be927..8088bea141f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go @@ -368,6 +368,11 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DeleteSshPublicKey func (c *Transfer) DeleteSshPublicKey(input *DeleteSshPublicKeyInput) (*DeleteSshPublicKeyOutput, error) { req, out := c.DeleteSshPublicKeyRequest(input) @@ -746,6 +751,11 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ImportSshPublicKey func (c *Transfer) ImportSshPublicKey(input *ImportSshPublicKeyInput) (*ImportSshPublicKeyOutput, error) { req, out := c.ImportSshPublicKeyRequest(input) @@ -1286,6 +1296,11 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StartServer func (c *Transfer) StartServer(input *StartServerInput) (*StartServerOutput, error) { req, out := c.StartServerRequest(input) @@ -1387,6 +1402,11 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StopServer func (c *Transfer) StopServer(input *StopServerInput) (*StopServerOutput, error) { req, out := c.StopServerRequest(input) @@ -1477,6 +1497,10 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TagResource func (c *Transfer) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -1661,6 +1685,10 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UntagResource func (c *Transfer) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -1750,10 +1778,18 @@ func (c *Transfer) UpdateServerRequest(input *UpdateServerInput) (req *request.R // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceExistsException "ResourceExistsException" +// The requested resource does not exist. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateServer func (c *Transfer) UpdateServer(input *UpdateServerInput) (*UpdateServerOutput, error) { req, out := c.UpdateServerRequest(input) @@ -1848,6 +1884,11 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateUser func (c *Transfer) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { req, out := c.UpdateUserRequest(input) @@ -1890,7 +1931,7 @@ type CreateServerInput struct { // to a new AWS SFTP server, don't update the host key. Accidentally changing // a server's host key can be disruptive. // - // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/change-host-key" + // For more information, see "https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" // in the AWS SFTP User Guide. HostKey *string `type:"string" sensitive:"true"` @@ -1910,7 +1951,7 @@ type CreateServerInput struct { // A value that allows the service to write your SFTP users' activity to your // Amazon CloudWatch logs for monitoring and auditing purposes. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // Key-value pairs that can be used to group and search for servers. Tags []*Tag `min:"1" type:"list"` @@ -1929,9 +1970,22 @@ func (s CreateServerInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateServerInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateServerInput"} + if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LoggingRole", 20)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1997,7 +2051,7 @@ type CreateServerOutput struct { // The service-assigned ID of the SFTP server that is created. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2020,9 +2074,33 @@ type CreateUserInput struct { _ struct{} `type:"structure"` // The landing directory (folder) for a user when they log in to the server - // using their SFTP client. An example is /home/username . + // using their SFTP client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP server. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // A scope-down policy for your user so you can use the same IAM role across // multiple users. This policy scopes down user access to portions of their // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, @@ -2047,13 +2125,13 @@ type CreateUserInput struct { // SFTP user's transfer requests. // // Role is a required field - Role *string `type:"string" required:"true"` + Role *string `min:"20" type:"string" required:"true"` // A system-assigned unique identifier for an SFTP server instance. This is // the specific SFTP server that you added your user to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The public portion of the Secure Shell (SSH) key used to authenticate the // user to the SFTP server. @@ -2069,7 +2147,7 @@ type CreateUserInput struct { // underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2085,18 +2163,40 @@ func (s CreateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2120,6 +2220,18 @@ func (s *CreateUserInput) SetHomeDirectory(v string) *CreateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *CreateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *CreateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *CreateUserInput) SetHomeDirectoryType(v string) *CreateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *CreateUserInput) SetPolicy(v string) *CreateUserInput { s.Policy = &v @@ -2162,12 +2274,12 @@ type CreateUserOutput struct { // The ID of the SFTP server that the user is attached to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user account associated with an SFTP server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2198,7 +2310,7 @@ type DeleteServerInput struct { // A unique system-assigned identifier for an SFTP server instance. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2217,6 +2329,9 @@ func (s *DeleteServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2251,17 +2366,17 @@ type DeleteSshPublicKeyInput struct { // server instance that has the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique identifier used to reference your user's specific SSH key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A unique string that identifies a user whose public key is being deleted. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2280,12 +2395,21 @@ func (s *DeleteSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyId == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyId")) } + if s.SshPublicKeyId != nil && len(*s.SshPublicKeyId) < 21 { + invalidParams.Add(request.NewErrParamMinLen("SshPublicKeyId", 21)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2332,12 +2456,12 @@ type DeleteUserInput struct { // the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user that is being deleted from the server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2356,9 +2480,15 @@ func (s *DeleteUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2398,7 +2528,7 @@ type DescribeServerInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2417,6 +2547,9 @@ func (s *DescribeServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2462,14 +2595,14 @@ type DescribeUserInput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The name of the user assigned to one or more servers. User names are part // of the sign-in credentials to use the AWS Transfer for SFTP service and perform // file transfer tasks. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2488,9 +2621,15 @@ func (s *DescribeUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2517,7 +2656,7 @@ type DescribeUserOutput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // An array containing the properties of the user account for the ServerID value // that you specified. @@ -2588,11 +2727,11 @@ type DescribedServer struct { // This property is an AWS Identity and Access Management (IAM) entity that // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. // When set, user activity can be viewed in your CloudWatch logs. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // This property is a unique system-assigned identifier for the SFTP server // that you instantiate. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // The condition of the SFTP server for the server that was described. A value // of ONLINE indicates that the server can accept jobs and transfer files. A @@ -2701,9 +2840,32 @@ type DescribedUser struct { // This property specifies the landing directory (or folder), which is the location // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example is /bucket_name/home/username . + // user. An example is /your s3 bucket name/home/username . HomeDirectory *string `type:"string"` + // Logical directory mappings that you specified for what S3 paths and keys + // should be visible to your user and how you want to make them visible. You + // will need to specify the "Entry" and "Target" pair, where Entry shows how + // the path is made visible and Target is the actual S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your AWS IAM Role provides access to paths in Target. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you mapped for your users' to see + // when they log into the SFTP server. If you set it to PATH, the user will + // see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you + // set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Specifies the name of the policy in use for the described user. Policy *string `type:"string"` @@ -2713,7 +2875,7 @@ type DescribedUser struct { // into and out of your Amazon S3 bucket or buckets. The IAM role should also // contain a trust relationship that allows the SFTP server to access your resources // when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This property contains the public key portion of the Secure Shell (SSH) keys // stored for the described user. @@ -2726,7 +2888,7 @@ type DescribedUser struct { // This property is the name of the user that was requested to be described. // User names are used for authentication purposes. This is the string that // will be used by your user when they log in to your SFTP server. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -2751,6 +2913,18 @@ func (s *DescribedUser) SetHomeDirectory(v string) *DescribedUser { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *DescribedUser) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *DescribedUser { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *DescribedUser) SetHomeDirectoryType(v string) *DescribedUser { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *DescribedUser) SetPolicy(v string) *DescribedUser { s.Policy = &v @@ -2787,7 +2961,7 @@ type EndpointDetails struct { _ struct{} `type:"structure"` // The ID of the VPC endpoint. - VpcEndpointId *string `type:"string"` + VpcEndpointId *string `min:"22" type:"string"` } // String returns the string representation @@ -2800,12 +2974,78 @@ func (s EndpointDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EndpointDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EndpointDetails"} + if s.VpcEndpointId != nil && len(*s.VpcEndpointId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("VpcEndpointId", 22)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetVpcEndpointId sets the VpcEndpointId field's value. func (s *EndpointDetails) SetVpcEndpointId(v string) *EndpointDetails { s.VpcEndpointId = &v return s } +// Represents an object that contains entries and a targets for HomeDirectoryMappings. +type HomeDirectoryMapEntry struct { + _ struct{} `type:"structure"` + + // Represents an entry and a target for HomeDirectoryMappings. + // + // Entry is a required field + Entry *string `type:"string" required:"true"` + + // Represents the map target that is used in a HomeDirectorymapEntry. + // + // Target is a required field + Target *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HomeDirectoryMapEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HomeDirectoryMapEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HomeDirectoryMapEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HomeDirectoryMapEntry"} + if s.Entry == nil { + invalidParams.Add(request.NewErrParamRequired("Entry")) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntry sets the Entry field's value. +func (s *HomeDirectoryMapEntry) SetEntry(v string) *HomeDirectoryMapEntry { + s.Entry = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *HomeDirectoryMapEntry) SetTarget(v string) *HomeDirectoryMapEntry { + s.Target = &v + return s +} + // Returns information related to the type of user authentication that is in // use for a server's users. A server can have only one method of authentication. type IdentityProviderDetails struct { @@ -2813,7 +3053,7 @@ type IdentityProviderDetails struct { // The InvocationRole parameter provides the type of InvocationRole used to // authenticate the user account. - InvocationRole *string `type:"string"` + InvocationRole *string `min:"20" type:"string"` // The Url parameter provides contains the location of the service endpoint // used to authenticate users. @@ -2830,6 +3070,19 @@ func (s IdentityProviderDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *IdentityProviderDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IdentityProviderDetails"} + if s.InvocationRole != nil && len(*s.InvocationRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("InvocationRole", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInvocationRole sets the InvocationRole field's value. func (s *IdentityProviderDetails) SetInvocationRole(v string) *IdentityProviderDetails { s.InvocationRole = &v @@ -2848,7 +3101,7 @@ type ImportSshPublicKeyInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The public key portion of an SSH key pair. // @@ -2858,7 +3111,7 @@ type ImportSshPublicKeyInput struct { // The name of the user account that is assigned to one or more servers. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2877,12 +3130,18 @@ func (s *ImportSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyBody == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyBody")) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2917,18 +3176,18 @@ type ImportSshPublicKeyOutput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // This identifier is the name given to a public key by the system that was // imported. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A user name assigned to the ServerID value that you specified. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -3175,7 +3434,7 @@ type ListUsersInput struct { // server that has users assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3200,6 +3459,9 @@ func (s *ListUsersInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3237,7 +3499,7 @@ type ListUsersOutput struct { // assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // Returns the user accounts and their properties for the ServerId value that // you specify. @@ -3296,11 +3558,11 @@ type ListedServer struct { // The AWS Identity and Access Management entity that allows the server to turn // on Amazon CloudWatch logging. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // This value is the unique system assigned identifier for the SFTP servers // that were listed. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // This property describes the condition of the SFTP server for the server that // was described. A value of ONLINE> indicates that the server can accept jobs @@ -3383,18 +3645,25 @@ type ListedUser struct { // an Amazon S3 bucket for the user you specify by their ARN. HomeDirectory *string `type:"string"` + // The type of landing directory (folder) you mapped for your users' home directory. + // If you set it to PATH, the user will see the absolute Amazon S3 bucket paths + // as is in their SFTP clients. If you set it LOGICAL, you will need to provide + // mappings in the HomeDirectoryMappings for how you want to make S3 paths visible + // to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // The role in use by this user. A role is an AWS Identity and Access Management // (IAM) entity that, in this case, allows the SFTP server to act on a user's // behalf. It allows the server to inherit the trust relationship that enables // that user to perform file operations to their Amazon S3 bucket. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This value is the number of SSH public keys stored for the user you specified. SshPublicKeyCount *int64 `type:"integer"` // The name of the user whose ARN was specified. User names are used for authentication // purposes. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -3419,6 +3688,12 @@ func (s *ListedUser) SetHomeDirectory(v string) *ListedUser { return s } +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *ListedUser) SetHomeDirectoryType(v string) *ListedUser { + s.HomeDirectoryType = &v + return s +} + // SetRole sets the Role field's value. func (s *ListedUser) SetRole(v string) *ListedUser { s.Role = &v @@ -3458,7 +3733,7 @@ type SshPublicKey struct { // The SshPublicKeyId parameter contains the identifier of the public key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` } // String returns the string representation @@ -3495,7 +3770,7 @@ type StartServerInput struct { // A system-assigned unique identifier for an SFTP server that you start. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3514,6 +3789,9 @@ func (s *StartServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3547,7 +3825,7 @@ type StopServerInput struct { // A system-assigned unique identifier for an SFTP server that you stopped. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3566,6 +3844,9 @@ func (s *StopServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3744,12 +4025,12 @@ type TestIdentityProviderInput struct { // method is tested with a user name and password. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // This request parameter is the name of the user account to be tested. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` // The password of the user account to be tested. UserPassword *string `type:"string" sensitive:"true"` @@ -3771,9 +4052,15 @@ func (s *TestIdentityProviderInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3966,7 +4253,7 @@ type UpdateServerInput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3985,6 +4272,19 @@ func (s *UpdateServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4035,7 +4335,7 @@ type UpdateServerOutput struct { // is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -4058,9 +4358,33 @@ type UpdateUserInput struct { _ struct{} `type:"structure"` // A parameter that specifies the landing directory (folder) for a user when - // they log in to the server using their client. An example is /home/username . + // they log in to the server using their client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP serve. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Allows you to supply a scope-down policy for your user so you can use the // same AWS Identity and Access Management (IAM) role across multiple users. // The policy scopes down user access to portions of your Amazon S3 bucket. @@ -4084,13 +4408,13 @@ type UpdateUserInput struct { // S3 bucket or buckets. The IAM role should also contain a trust relationship // that allows the Secure File Transfer Protocol (SFTP) server to access your // resources when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // A system-assigned unique identifier for an SFTP server instance that the // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user and is associated with a server as // specified by the ServerId. This is the string that will be used by your user @@ -4099,7 +4423,7 @@ type UpdateUserInput struct { // A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4115,12 +4439,34 @@ func (s UpdateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4134,6 +4480,18 @@ func (s *UpdateUserInput) SetHomeDirectory(v string) *UpdateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *UpdateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *UpdateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *UpdateUserInput) SetHomeDirectoryType(v string) *UpdateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *UpdateUserInput) SetPolicy(v string) *UpdateUserInput { s.Policy = &v @@ -4167,13 +4525,13 @@ type UpdateUserOutput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The unique identifier for a user that is assigned to the SFTP server instance // that was specified in the request. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4206,6 +4564,14 @@ const ( EndpointTypeVpcEndpoint = "VPC_ENDPOINT" ) +const ( + // HomeDirectoryTypePath is a HomeDirectoryType enum value + HomeDirectoryTypePath = "PATH" + + // HomeDirectoryTypeLogical is a HomeDirectoryType enum value + HomeDirectoryTypeLogical = "LOGICAL" +) + // Returns information related to the type of user authentication that is in // use for a server's users. For SERVICE_MANAGED authentication, the Secure // Shell (SSH) public keys are stored with a user on an SFTP server instance. diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go index 0734c873b55..60b6a6269fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go @@ -41,4 +41,12 @@ const ( // // The request has failed because the AWS Transfer for SFTP service is not available. ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. + // + // HTTP Status Code: 400 + ErrCodeThrottlingException = "ThrottlingException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go index 0fcea8665a0..90791826dac 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Transfer { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "transfer" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Transfer { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Transfer { svc := &Transfer{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-05", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go index 09bf43d9eeb..81b9b1c93d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go @@ -46,11 +46,11 @@ const ( // svc := waf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAF { svc := &WAF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go index 3a267ae6360..1eeabd40f0f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go @@ -46,11 +46,11 @@ const ( // svc := wafregional.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAFRegional { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAFRegional { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAFRegional { svc := &WAFRegional{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go index c8a7fc0067e..5fae1688d08 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkLink { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "worklink" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkLink { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkLink { svc := &WorkLink{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go index d48d30c8b8d..a785c950d76 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -3907,7 +3907,16 @@ type DefaultWorkspaceCreationProperties struct { // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` - // The public IP address to attach to all WorkSpaces that are created or rebuilt. + // Specifies whether to automatically assign a public IP address to WorkSpaces + // in this directory by default. If enabled, the public IP address allows outbound + // internet access from your WorkSpaces when you’re using an internet gateway + // in the Amazon VPC in which your WorkSpaces are located. If you're using a + // Network Address Translation (NAT) gateway for outbound internet access from + // your VPC, or if your WorkSpaces are in public subnets and you manually assign + // them Elastic IP addresses, you should disable this setting. This setting + // applies to new WorkSpaces that you launch or to existing WorkSpaces that + // you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces + // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). EnableInternetAccess *bool `type:"boolean"` // Specifies whether the directory is enabled for Amazon WorkDocs. @@ -4791,7 +4800,7 @@ type DescribeWorkspaceSnapshotsOutput struct { _ struct{} `type:"structure"` // Information about the snapshots that can be used to rebuild a WorkSpace. - // These snapshots include the root volume. + // These snapshots include the user volume. RebuildSnapshots []*Snapshot `type:"list"` // Information about the snapshots that can be used to restore a WorkSpace. @@ -7152,7 +7161,7 @@ type WorkspaceProperties struct { RunningMode *string `type:"string" enum:"RunningMode"` // The time after a user logs off when WorkSpaces are automatically stopped. - // Configured in 60 minute intervals. + // Configured in 60-minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` // The size of the user storage. diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go index 38e1cc2ee1f..63ae2bf7457 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -46,11 +46,11 @@ const ( // svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkSpaces { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkSpaces { svc := &WorkSpaces{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go index fdc5ea32958..9a34ccedc9b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go @@ -46,11 +46,11 @@ const ( // svc := xray.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *XRay { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *XRay { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *XRay { svc := &XRay{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-12", }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 73f8224c22c..25315a1fd39 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.25.10 +# github.com/aws/aws-sdk-go v1.25.21 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr From 6d98ab2e64869f8e1a23ce5ab4c8bc9ebd1aa4ab Mon Sep 17 00:00:00 2001 From: Anatoli Iariomenco <33262478+qlikcoe@users.noreply.github.com> Date: Mon, 28 Oct 2019 20:51:45 -0400 Subject: [PATCH 40/55] docs: fix cidr_block reference --- website/docs/r/elasticsearch_domain.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index bab2fcb0c0a..2b054de2a59 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -147,7 +147,7 @@ resource "aws_security_group" "es" { protocol = "tcp" cidr_blocks = [ - "${data.aws_vpc.selected.cidr_blocks}", + "${data.aws_vpc.selected.cidr_block}", ] } } From 3dc7d3c758cefac83dc8ece1ffd3212c75ce3fde Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 28 Oct 2019 23:49:06 -0400 Subject: [PATCH 41/55] resource/aws_dx_gateway_association: Handle nil dx_gateway_association_id in StateUpgraders, update acceptance testing for StateUpgraders Output from acceptance testing: ``` --- PASS: TestAccAwsDxGatewayAssociation_basicTransitGatewaySingleAccount (929.91s) --- PASS: TestAccAwsDxGatewayAssociation_multiVpnGatewaysSingleAccount (1000.41s) --- PASS: TestAccAwsDxGatewayAssociation_basicTransitGatewayCrossAccount (1092.54s) --- PASS: TestAccAwsDxGatewayAssociation_basicVpnGatewayCrossAccount (1122.39s) --- PASS: TestAccAwsDxGatewayAssociation_deprecatedSingleAccount (1125.61s) --- PASS: TestAccAwsDxGatewayAssociation_basicVpnGatewaySingleAccount (1223.48s) --- PASS: TestAccAwsDxGatewayAssociation_allowedPrefixesVpnGatewaySingleAccount (1324.25s) --- PASS: TestAccAwsDxGatewayAssociation_allowedPrefixesVpnGatewayCrossAccount (1325.74s) ``` --- ...urce_aws_dx_gateway_association_migrate.go | 2 +- ...esource_aws_dx_gateway_association_test.go | 22 ++++++++----------- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/aws/resource_aws_dx_gateway_association_migrate.go b/aws/resource_aws_dx_gateway_association_migrate.go index b5555b5297d..8986f1d80ac 100644 --- a/aws/resource_aws_dx_gateway_association_migrate.go +++ b/aws/resource_aws_dx_gateway_association_migrate.go @@ -81,7 +81,7 @@ func resourceAwsDxGatewayAssociationStateUpgradeV0(rawState map[string]interface log.Println("[INFO] Found Direct Connect gateway association state v0; migrating to v1") // dx_gateway_association_id was introduced in v2.8.0. Handle the case where it's not yet present. - if _, ok := rawState["dx_gateway_association_id"]; !ok { + if v, ok := rawState["dx_gateway_association_id"]; !ok || v == nil { resp, err := conn.DescribeDirectConnectGatewayAssociations(&directconnect.DescribeDirectConnectGatewayAssociationsInput{ DirectConnectGatewayId: aws.String(rawState["dx_gateway_id"].(string)), VirtualGatewayId: aws.String(rawState["vpn_gateway_id"].(string)), diff --git a/aws/resource_aws_dx_gateway_association_test.go b/aws/resource_aws_dx_gateway_association_test.go index 4840cf933c3..897eb9e400a 100644 --- a/aws/resource_aws_dx_gateway_association_test.go +++ b/aws/resource_aws_dx_gateway_association_test.go @@ -206,7 +206,7 @@ func TestAccAwsDxGatewayAssociation_deprecatedSingleAccount(t *testing.T) { testAccCheckResourceAttrAccountID(resourceName, "dx_gateway_owner_account_id"), resource.TestCheckResourceAttr(resourceName, "allowed_prefixes.#", "1"), resource.TestCheckResourceAttr(resourceName, "allowed_prefixes.1216997074", "10.255.255.0/28"), - testAccCheckAwsDxGatewayAssociationMigrateState(resourceName), + testAccCheckAwsDxGatewayAssociationStateUpgradeV0(resourceName), ), }, }, @@ -523,7 +523,8 @@ func testAccCheckAwsDxGatewayAssociationExists(name string) resource.TestCheckFu } } -func testAccCheckAwsDxGatewayAssociationMigrateState(name string) resource.TestCheckFunc { +// Perform check in acceptance testing as this StateUpgrader requires an API call +func testAccCheckAwsDxGatewayAssociationStateUpgradeV0(name string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -533,23 +534,18 @@ func testAccCheckAwsDxGatewayAssociationMigrateState(name string) resource.TestC return fmt.Errorf("No ID is set") } - is := &terraform.InstanceState{ - ID: rs.Primary.ID, - Attributes: map[string]string{ - "dx_gateway_id": rs.Primary.Attributes["dx_gateway_id"], - "vpn_gateway_id": rs.Primary.Attributes["vpn_gateway_id"], - }, + rawState := map[string]interface{}{ + "dx_gateway_id": rs.Primary.Attributes["dx_gateway_id"], + "vpn_gateway_id": rs.Primary.Attributes["vpn_gateway_id"], } - is, err := resourceAwsDxGatewayAssociation().MigrateState(0, is, testAccProvider.Meta()) + updatedRawState, err := resourceAwsDxGatewayAssociationStateUpgradeV0(rawState, testAccProvider.Meta()) if err != nil { return err } - if is.Attributes["dx_gateway_association_id"] != rs.Primary.Attributes["dx_gateway_association_id"] { - return fmt.Errorf("Invalid dx_gateway_association_id attribute in migrated state. Expected %s, got %s", - rs.Primary.Attributes["dx_gateway_association_id"], - is.Attributes["dx_gateway_association_id"]) + if got, want := updatedRawState["dx_gateway_association_id"], rs.Primary.Attributes["dx_gateway_association_id"]; got != want { + return fmt.Errorf("Invalid dx_gateway_association_id attribute in migrated state. Expected %s, got %s", want, got) } return nil From 175a5441b52ca3414db1c5f53a8d9fafaf6d770b Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Mon, 28 Oct 2019 23:51:44 -0400 Subject: [PATCH 42/55] Update CHANGELOG for #8776 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47f77524e25..ba4e1bfa59e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ BUG FIXES: * resource/aws_cloudhsm_v2_cluster: Ensure multiple tag configurations are applied correctly [GH-10309] * resource/aws_cloudhsm_v2_cluster: Perform drift detection with tags [GH-10309] +* resource/aws_dx_gateway_association: Fix backwards compatibility issue with missing `dx_gateway_association_id` attribute [GH-8776] ## 2.33.0 (October 17, 2019) From 93d023fb6518b33b6da3e7a5b5d52d2f1c8784b2 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 29 Oct 2019 00:36:21 -0400 Subject: [PATCH 43/55] Update CHANGELOG for #10641 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ba4e1bfa59e..6e0ac4f0dec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ BUG FIXES: +* resource/aws_backup_plan: Correctly handle changes to `recovery_point_tags` arguments [GH-10641] +* resource/aws_backup_plan: Prevent `diffs didn't match` errors with `rule` configuration blocks [GH-10641] * resource/aws_cloudhsm_v2_cluster: Ensure multiple tag configurations are applied correctly [GH-10309] * resource/aws_cloudhsm_v2_cluster: Perform drift detection with tags [GH-10309] * resource/aws_dx_gateway_association: Fix backwards compatibility issue with missing `dx_gateway_association_id` attribute [GH-8776] From afc38e4020cf65f61077f2f093e65e7abb756cd7 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 29 Oct 2019 10:09:10 +0100 Subject: [PATCH 44/55] Import test refactor for opsworks resources --- ...resource_aws_opsworks_custom_layer_test.go | 167 ++++++------------ aws/resource_aws_opsworks_instance_test.go | 108 ++++------- aws/resource_aws_opsworks_stack_test.go | 119 +++++++------ 3 files changed, 151 insertions(+), 243 deletions(-) diff --git a/aws/resource_aws_opsworks_custom_layer_test.go b/aws/resource_aws_opsworks_custom_layer_test.go index 7f1890e4691..0b548e29165 100644 --- a/aws/resource_aws_opsworks_custom_layer_test.go +++ b/aws/resource_aws_opsworks_custom_layer_test.go @@ -14,11 +14,11 @@ import ( ) // These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role` -// and `aws-opsworks-service-role`. +// and `aws-opsworks-service-role`, and Opsworks stacks named `tf-acc`. -func TestAccAWSOpsworksCustomLayer_importBasic(t *testing.T) { +func TestAccAWSOpsworksCustomLayer_basic(t *testing.T) { name := acctest.RandString(10) - + var opslayer opsworks.Layer resourceName := "aws_opsworks_custom_layer.tf-acc" resource.ParallelTest(t, resource.TestCase{ @@ -28,8 +28,24 @@ func TestAccAWSOpsworksCustomLayer_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAwsOpsworksCustomLayerConfigVpcCreate(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSOpsworksCustomLayerExists(resourceName, &opslayer), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "auto_assign_elastic_ips", "false"), + resource.TestCheckResourceAttr(resourceName, "auto_healing", "true"), + resource.TestCheckResourceAttr(resourceName, "drain_elb_on_shutdown", "true"), + resource.TestCheckResourceAttr(resourceName, "instance_shutdown_timeout", "300"), + resource.TestCheckResourceAttr(resourceName, "custom_security_group_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "system_packages.#", "2"), + resource.TestCheckResourceAttr(resourceName, "system_packages.1368285564", "git"), + resource.TestCheckResourceAttr(resourceName, "system_packages.2937857443", "golang"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.type", "gp2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.number_of_disks", "2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.mount_point", "/home"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.size", "100"), + ), }, - { ResourceName: resourceName, ImportState: true, @@ -39,9 +55,11 @@ func TestAccAWSOpsworksCustomLayer_importBasic(t *testing.T) { }) } -func TestAccAWSOpsworksCustomLayer_basic(t *testing.T) { +func TestAccAWSOpsworksCustomLayer_noVPC(t *testing.T) { stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) var opslayer opsworks.Layer + resourceName := "aws_opsworks_custom_layer.tf-acc" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -50,116 +68,47 @@ func TestAccAWSOpsworksCustomLayer_basic(t *testing.T) { { Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksCustomLayerExists( - "aws_opsworks_custom_layer.tf-acc", &opslayer), + testAccCheckAWSOpsworksCustomLayerExists(resourceName, &opslayer), testAccCheckAWSOpsworksCreateLayerAttributes(&opslayer, stackName), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "name", stackName, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "auto_assign_elastic_ips", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "auto_healing", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "300", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.#", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100", - ), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "auto_assign_elastic_ips", "false"), + resource.TestCheckResourceAttr(resourceName, "auto_healing", "true"), + resource.TestCheckResourceAttr(resourceName, "drain_elb_on_shutdown", "true"), + resource.TestCheckResourceAttr(resourceName, "instance_shutdown_timeout", "300"), + resource.TestCheckResourceAttr(resourceName, "custom_security_group_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "system_packages.#", "2"), + resource.TestCheckResourceAttr(resourceName, "system_packages.1368285564", "git"), + resource.TestCheckResourceAttr(resourceName, "system_packages.2937857443", "golang"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.#", "1"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.type", "gp2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.number_of_disks", "2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.mount_point", "/home"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.size", "100"), ), }, { Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "name", stackName, - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "false", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "120", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "3", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.#", "3", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "system_packages.4101929740", "subversion", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.type", "io1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.number_of_disks", "4", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.mount_point", "/var", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.size", "100", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.raid_level", "1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.iops", "3000", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_custom_layer.tf-acc", "custom_json", `{"layer_key":"layer_value2"}`, - ), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "drain_elb_on_shutdown", "false"), + resource.TestCheckResourceAttr(resourceName, "instance_shutdown_timeout", "120"), + resource.TestCheckResourceAttr(resourceName, "custom_security_group_ids.#", "3"), + resource.TestCheckResourceAttr(resourceName, "system_packages.#", "3"), + resource.TestCheckResourceAttr(resourceName, "system_packages.1368285564", "git"), + resource.TestCheckResourceAttr(resourceName, "system_packages.2937857443", "golang"), + resource.TestCheckResourceAttr(resourceName, "system_packages.4101929740", "subversion"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.#", "2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.type", "gp2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.number_of_disks", "2"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.mount_point", "/home"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.3575749636.size", "100"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.type", "io1"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.number_of_disks", "4"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.mount_point", "/var"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.size", "100"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.raid_level", "1"), + resource.TestCheckResourceAttr(resourceName, "ebs_volume.1266957920.iops", "3000"), + resource.TestCheckResourceAttr(resourceName, "custom_json", `{"layer_key":"layer_value2"}`), ), }, }, diff --git a/aws/resource_aws_opsworks_instance_test.go b/aws/resource_aws_opsworks_instance_test.go index 23d6ff2805b..f1cdc8c3278 100644 --- a/aws/resource_aws_opsworks_instance_test.go +++ b/aws/resource_aws_opsworks_instance_test.go @@ -12,8 +12,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSOpsworksInstance_importBasic(t *testing.T) { +func TestAccAWSOpsworksInstance_basic(t *testing.T) { stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) + var opsinst opsworks.Instance resourceName := "aws_opsworks_instance.tf-acc" resource.ParallelTest(t, resource.TestCase{ @@ -23,85 +24,37 @@ func TestAccAWSOpsworksInstance_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAwsOpsworksInstanceConfigCreate(stackName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSOpsworksInstanceExists(resourceName, &opsinst), + testAccCheckAWSOpsworksInstanceAttributes(&opsinst), + resource.TestCheckResourceAttr(resourceName, "hostname", "tf-acc1"), + resource.TestCheckResourceAttr(resourceName, "instance_type", "t2.micro"), + resource.TestCheckResourceAttr(resourceName, "state", "stopped"), + resource.TestCheckResourceAttr(resourceName, "layer_ids.#", "1"), + resource.TestCheckResourceAttr(resourceName, "install_updates_on_boot", "true"), + resource.TestCheckResourceAttr(resourceName, "architecture", "x86_64"), + resource.TestCheckResourceAttr(resourceName, "tenancy", "default"), + resource.TestCheckResourceAttr(resourceName, "os", "Amazon Linux 2016.09"), // inherited from opsworks_stack_test + resource.TestCheckResourceAttr(resourceName, "root_device_type", "ebs"), // inherited from opsworks_stack_test + resource.TestCheckResourceAttr(resourceName, "availability_zone", "us-west-2a"), // inherited from opsworks_stack_test + ), }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"state"}, //state is something we pass to the API and get back as status :( }, - }, - }) -} - -func TestAccAWSOpsworksInstance(t *testing.T) { - stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - var opsinst opsworks.Instance - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksInstanceConfigCreate(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists( - "aws_opsworks_instance.tf-acc", &opsinst), - testAccCheckAWSOpsworksInstanceAttributes(&opsinst), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "hostname", "tf-acc1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "instance_type", "t2.micro", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "state", "stopped", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "layer_ids.#", "1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "install_updates_on_boot", "true", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "architecture", "x86_64", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "tenancy", "default", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2016.09", // inherited from opsworks_stack_test - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "root_device_type", "ebs", // inherited from opsworks_stack_test - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "availability_zone", "us-west-2a", // inherited from opsworks_stack_test - ), - ), - }, { Config: testAccAwsOpsworksInstanceConfigUpdate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists( - "aws_opsworks_instance.tf-acc", &opsinst), + testAccCheckAWSOpsworksInstanceExists(resourceName, &opsinst), testAccCheckAWSOpsworksInstanceAttributes(&opsinst), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "hostname", "tf-acc1", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "instance_type", "t2.small", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "layer_ids.#", "2", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "os", "Amazon Linux 2015.09", - ), - resource.TestCheckResourceAttr( - "aws_opsworks_instance.tf-acc", "tenancy", "default", - ), + resource.TestCheckResourceAttr(resourceName, "hostname", "tf-acc1"), + resource.TestCheckResourceAttr(resourceName, "instance_type", "t2.small"), + resource.TestCheckResourceAttr(resourceName, "layer_ids.#", "2"), + resource.TestCheckResourceAttr(resourceName, "os", "Amazon Linux 2015.09"), + resource.TestCheckResourceAttr(resourceName, "tenancy", "default"), ), }, }, @@ -110,8 +63,9 @@ func TestAccAWSOpsworksInstance(t *testing.T) { func TestAccAWSOpsworksInstance_UpdateHostNameForceNew(t *testing.T) { stackName := fmt.Sprintf("tf-%d", acctest.RandInt()) - + resourceName := "aws_opsworks_instance.tf-acc" var before, after opsworks.Instance + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -120,15 +74,21 @@ func TestAccAWSOpsworksInstance_UpdateHostNameForceNew(t *testing.T) { { Config: testAccAwsOpsworksInstanceConfigCreate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists("aws_opsworks_instance.tf-acc", &before), - resource.TestCheckResourceAttr("aws_opsworks_instance.tf-acc", "hostname", "tf-acc1"), + testAccCheckAWSOpsworksInstanceExists(resourceName, &before), + resource.TestCheckResourceAttr(resourceName, "hostname", "tf-acc1"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"state"}, + }, { Config: testAccAwsOpsworksInstanceConfigUpdateHostName(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksInstanceExists("aws_opsworks_instance.tf-acc", &after), - resource.TestCheckResourceAttr("aws_opsworks_instance.tf-acc", "hostname", "tf-acc2"), + testAccCheckAWSOpsworksInstanceExists(resourceName, &after), + resource.TestCheckResourceAttr(resourceName, "hostname", "tf-acc2"), testAccCheckAwsOpsworksInstanceRecreated(t, &before, &after), ), }, diff --git a/aws/resource_aws_opsworks_stack_test.go b/aws/resource_aws_opsworks_stack_test.go index 54cf9be61e0..fdcb7e7a54e 100644 --- a/aws/resource_aws_opsworks_stack_test.go +++ b/aws/resource_aws_opsworks_stack_test.go @@ -13,36 +13,15 @@ import ( "github.com/aws/aws-sdk-go/service/opsworks" ) -func TestAccAWSOpsworksStack_ImportBasic(t *testing.T) { - name := acctest.RandString(10) - - resourceName := "aws_opsworks_stack.tf-acc" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsOpsworksStackDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAwsOpsworksStackConfigVpcCreate(name), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - /////////////////////////////// //// Tests for the No-VPC case /////////////////////////////// -func TestAccAWSOpsworksStack_NoVpc(t *testing.T) { +func TestAccAWSOpsworksStack_noVpcBasic(t *testing.T) { stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) + resourceName := "aws_opsworks_stack.tf-acc" var opsstack opsworks.Stack + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -51,21 +30,25 @@ func TestAccAWSOpsworksStack_NoVpc(t *testing.T) { { Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &opsstack), - testAccCheckAWSOpsworksCreateStackAttributes( - &opsstack, "us-east-1a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsCreate( - "us-east-1a", stackName), + testAccCheckAWSOpsworksStackExists(resourceName, false, &opsstack), + testAccCheckAWSOpsworksCreateStackAttributes(&opsstack, "us-east-1a", stackName), + testAccAwsOpsworksStackCheckResourceAttrsCreate("us-east-1a", stackName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } -func TestAccAWSOpsworksStack_NoVpcChangeServiceRoleForceNew(t *testing.T) { +func TestAccAWSOpsworksStack_noVpcChangeServiceRoleForceNew(t *testing.T) { stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) + resourceName := "aws_opsworks_stack.tf-acc" var before, after opsworks.Stack + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -74,15 +57,18 @@ func TestAccAWSOpsworksStack_NoVpcChangeServiceRoleForceNew(t *testing.T) { { Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &before), + testAccCheckAWSOpsworksStackExists(resourceName, false, &before), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAwsOpsworksStackConfigNoVpcCreateUpdateServiceRole(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &after), + testAccCheckAWSOpsworksStackExists(resourceName, false, &after), testAccCheckAWSOpsworksStackRecreated(t, &before, &after), ), }, @@ -90,9 +76,11 @@ func TestAccAWSOpsworksStack_NoVpcChangeServiceRoleForceNew(t *testing.T) { }) } -func TestAccAWSOpsworksStack_Vpc(t *testing.T) { +func TestAccAWSOpsworksStack_vpc(t *testing.T) { stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) + resourceName := "aws_opsworks_stack.tf-acc" var opsstack opsworks.Stack + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -101,32 +89,33 @@ func TestAccAWSOpsworksStack_Vpc(t *testing.T) { { Config: testAccAwsOpsworksStackConfigVpcCreate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", true, &opsstack), - testAccCheckAWSOpsworksCreateStackAttributes( - &opsstack, "us-west-2a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsCreate( - "us-west-2a", stackName), + testAccCheckAWSOpsworksStackExists(resourceName, true, &opsstack), + testAccCheckAWSOpsworksCreateStackAttributes(&opsstack, "us-west-2a", stackName), + testAccAwsOpsworksStackCheckResourceAttrsCreate("us-west-2a", stackName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSOpsworksStackConfigVpcUpdate(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", true, &opsstack), - testAccCheckAWSOpsworksUpdateStackAttributes( - &opsstack, "us-west-2a", stackName), - testAccAwsOpsworksStackCheckResourceAttrsUpdate( - "us-west-2a", stackName), + testAccCheckAWSOpsworksStackExists(resourceName, true, &opsstack), + testAccCheckAWSOpsworksUpdateStackAttributes(&opsstack, "us-west-2a", stackName), + testAccAwsOpsworksStackCheckResourceAttrsUpdate("us-west-2a", stackName), ), }, }, }) } -func TestAccAWSOpsworksStack_NoVpcCreateTags(t *testing.T) { +func TestAccAWSOpsworksStack_noVpcCreateTags(t *testing.T) { stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) + resourceName := "aws_opsworks_stack.tf-acc" var opsstack opsworks.Stack + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -135,19 +124,23 @@ func TestAccAWSOpsworksStack_NoVpcCreateTags(t *testing.T) { { Config: testAccAwsOpsworksStackConfigNoVpcCreateTags(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &opsstack), - resource.TestCheckResourceAttr("aws_opsworks_stack.tf-acc", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_opsworks_stack.tf-acc", "tags.foo", "bar"), + testAccCheckAWSOpsworksStackExists(resourceName, false, &opsstack), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.foo", "bar"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tags"}, + }, { Config: testAccAwsOpsworksStackConfigNoVpcUpdateTags(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.tf-acc", false, &opsstack), - resource.TestCheckResourceAttr("aws_opsworks_stack.tf-acc", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_opsworks_stack.tf-acc", "tags.wut", "asdf"), + testAccCheckAWSOpsworksStackExists(resourceName, false, &opsstack), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.wut", "asdf"), ), }, }, @@ -157,10 +150,12 @@ func TestAccAWSOpsworksStack_NoVpcCreateTags(t *testing.T) { // Tests the addition of regional endpoints and supporting the classic link used // to create Stack's prior to v0.9.0. // See https://github.com/hashicorp/terraform/issues/12842 -func TestAccAWSOpsWorksStack_classic_endpoints(t *testing.T) { +func TestAccAWSOpsWorksStack_classicEndpoints(t *testing.T) { stackName := fmt.Sprintf("tf-opsworks-acc-%d", acctest.RandInt()) + resourceName := "aws_opsworks_stack.main" rInt := acctest.RandInt() var opsstack opsworks.Stack + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -169,10 +164,14 @@ func TestAccAWSOpsWorksStack_classic_endpoints(t *testing.T) { { Config: testAccAwsOpsWorksStack_classic_endpoint(stackName, rInt), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSOpsworksStackExists( - "aws_opsworks_stack.main", false, &opsstack), + testAccCheckAWSOpsworksStackExists(resourceName, false, &opsstack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Ensure that changing to us-west-2 region results in no plan { Config: testAccAwsOpsWorksStack_regional_endpoint(stackName, rInt), From 56ffd3c99cb4909633ecb091df2674d746222f8f Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 29 Oct 2019 10:14:03 +0100 Subject: [PATCH 45/55] use resourceName everywhere --- ..._aws_elasticache_replication_group_test.go | 96 +++++++++---------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/aws/resource_aws_elasticache_replication_group_test.go b/aws/resource_aws_elasticache_replication_group_test.go index 9d22d991ef8..29ea45e41d7 100644 --- a/aws/resource_aws_elasticache_replication_group_test.go +++ b/aws/resource_aws_elasticache_replication_group_test.go @@ -75,15 +75,15 @@ func TestAccAWSElasticacheReplicationGroup_basic(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "cluster_mode.#", "0"), + resourceName, "cluster_mode.#", "0"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "member_clusters.#", "2"), + resourceName, "member_clusters.#", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), + resourceName, "auto_minor_version_upgrade", "false"), ), }, { @@ -109,9 +109,9 @@ func TestAccAWSElasticacheReplicationGroup_Uppercase(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig_Uppercase(strings.ToUpper(rName)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "replication_group_id", rName), + resourceName, "replication_group_id", rName), ), }, { @@ -141,13 +141,13 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "replication_group_description", "test description"), + resourceName, "replication_group_description", "test description"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), + resourceName, "auto_minor_version_upgrade", "false"), ), }, { @@ -159,13 +159,13 @@ func TestAccAWSElasticacheReplicationGroup_updateDescription(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedDescription(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "replication_group_description", "updated description"), + resourceName, "replication_group_description", "updated description"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "true"), + resourceName, "auto_minor_version_upgrade", "true"), ), }, }, @@ -189,9 +189,9 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "maintenance_window", "tue:06:30-tue:07:30"), + resourceName, "maintenance_window", "tue:06:30-tue:07:30"), ), }, { @@ -203,9 +203,9 @@ func TestAccAWSElasticacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedMaintenanceWindow(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "maintenance_window", "wed:03:00-wed:06:00"), + resourceName, "maintenance_window", "wed:03:00-wed:06:00"), ), }, }, @@ -229,11 +229,11 @@ func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "node_type", "cache.m1.small"), + resourceName, "node_type", "cache.m1.small"), ), }, { @@ -245,11 +245,11 @@ func TestAccAWSElasticacheReplicationGroup_updateNodeSize(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfigUpdatedNodeSize(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "node_type", "cache.m1.medium"), + resourceName, "node_type", "cache.m1.medium"), ), }, }, @@ -305,11 +305,11 @@ func TestAccAWSElasticacheReplicationGroup_vpc(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "1"), + resourceName, "number_cache_clusters", "1"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "auto_minor_version_upgrade", "false"), + resourceName, "auto_minor_version_upgrade", "false"), ), }, { @@ -334,17 +334,17 @@ func TestAccAWSElasticacheReplicationGroup_multiAzInVpc(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupMultiAZInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "automatic_failover_enabled", "true"), + resourceName, "automatic_failover_enabled", "true"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_window", "02:00-03:00"), + resourceName, "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_retention_limit", "7"), + resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.test", "primary_endpoint_address"), + resourceName, "primary_endpoint_address"), ), }, { @@ -369,17 +369,17 @@ func TestAccAWSElasticacheReplicationGroup_redisClusterInVpc2(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupRedisClusterInVPCConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "number_cache_clusters", "2"), + resourceName, "number_cache_clusters", "2"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "automatic_failover_enabled", "false"), + resourceName, "automatic_failover_enabled", "false"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_window", "02:00-03:00"), + resourceName, "snapshot_window", "02:00-03:00"), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_retention_limit", "7"), + resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet( - "aws_elasticache_replication_group.test", "primary_endpoint_address"), + resourceName, "primary_endpoint_address"), ), }, { @@ -508,9 +508,9 @@ func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfig(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_retention_limit", "0"), + resourceName, "snapshot_retention_limit", "0"), ), }, { @@ -522,9 +522,9 @@ func TestAccAWSElasticacheReplicationGroup_enableSnapshotting(t *testing.T) { { Config: testAccAWSElasticacheReplicationGroupConfigEnableSnapshotting(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "snapshot_retention_limit", "2"), + resourceName, "snapshot_retention_limit", "2"), ), }, }, @@ -543,9 +543,9 @@ func TestAccAWSElasticacheReplicationGroup_enableAuthTokenTransitEncryption(t *t { Config: testAccAWSElasticacheReplicationGroup_EnableAuthTokenTransitEncryptionConfig(acctest.RandInt(), acctest.RandString(10), acctest.RandString(16)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "transit_encryption_enabled", "true"), + resourceName, "transit_encryption_enabled", "true"), ), }, { @@ -570,9 +570,9 @@ func TestAccAWSElasticacheReplicationGroup_enableAtRestEncryption(t *testing.T) { Config: testAccAWSElasticacheReplicationGroup_EnableAtRestEncryptionConfig(acctest.RandInt(), acctest.RandString(10)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSElasticacheReplicationGroupExists("aws_elasticache_replication_group.test", &rg), + testAccCheckAWSElasticacheReplicationGroupExists(resourceName, &rg), resource.TestCheckResourceAttr( - "aws_elasticache_replication_group.test", "at_rest_encryption_enabled", "true"), + resourceName, "at_rest_encryption_enabled", "true"), ), }, { From a91e20abf657f1b328dfbc8e2ff87311c1ff40b0 Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 29 Oct 2019 10:26:22 +0100 Subject: [PATCH 46/55] rename a test to avoid a lintignore --- aws/resource_aws_lightsail_key_pair_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/aws/resource_aws_lightsail_key_pair_test.go b/aws/resource_aws_lightsail_key_pair_test.go index 42bf4c83d0c..8796ba6eeab 100644 --- a/aws/resource_aws_lightsail_key_pair_test.go +++ b/aws/resource_aws_lightsail_key_pair_test.go @@ -36,8 +36,7 @@ func TestAccAWSLightsailKeyPair_basic(t *testing.T) { }) } -//lintignore: AT002 -func TestAccAWSLightsailKeyPair_imported(t *testing.T) { +func TestAccAWSLightsailKeyPair_publicKey(t *testing.T) { var conf lightsail.KeyPair lightsailName := fmt.Sprintf("tf-test-lightsail-%d", acctest.RandInt()) From bfad019cd7f897d10dd0de0a8d6469b4c34165ca Mon Sep 17 00:00:00 2001 From: Ryn Daniels Date: Tue, 29 Oct 2019 10:29:31 +0100 Subject: [PATCH 47/55] put the comment back --- aws/resource_aws_instance_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/aws/resource_aws_instance_test.go b/aws/resource_aws_instance_test.go index 4d1b89f503e..bd9b59384fa 100644 --- a/aws/resource_aws_instance_test.go +++ b/aws/resource_aws_instance_test.go @@ -1363,6 +1363,8 @@ func TestAccAWSInstance_associatePublicIPAndPrivateIP(t *testing.T) { }) } +// Guard against regression with KeyPairs +// https://github.com/hashicorp/terraform/issues/2302 func TestAccAWSInstance_keyPairCheck(t *testing.T) { var v ec2.Instance resourceName := "aws_instance.test" From 843ffd83e5bb826b83ddac63cd0f0db483757472 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 29 Oct 2019 14:05:08 -0400 Subject: [PATCH 48/55] provider: Add missing github.com/aws/aws-sdk-go@v1.25.21 checksum from renovate update --- go.sum | 1 + 1 file changed, 1 insertion(+) diff --git a/go.sum b/go.sum index 111429272a7..9b73d5796e8 100644 --- a/go.sum +++ b/go.sum @@ -36,6 +36,7 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.21 h1:ikvfTGgl09JB7LBK7V4RldG7q07SoSdFO5Kq1QZOWkM= github.com/aws/aws-sdk-go v1.25.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= From d7b0cee1de607d27fc1a776ad7cb277e0b49c39e Mon Sep 17 00:00:00 2001 From: Renovate Bot Date: Tue, 29 Oct 2019 18:54:04 +0000 Subject: [PATCH 49/55] Update module aws/aws-sdk-go to v1.25.22 --- go.mod | 2 +- go.sum | 4 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/appstream/api.go | 211 +++++++++++++++++- .../aws/aws-sdk-go/service/cloud9/api.go | 12 +- vendor/modules.txt | 2 +- 6 files changed, 219 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 9de5169821b..d042330034b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.13 require ( github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 // indirect - github.com/aws/aws-sdk-go v1.25.21 + github.com/aws/aws-sdk-go v1.25.22 github.com/beevik/etree v1.1.0 github.com/bflad/tfproviderlint v0.5.0 github.com/client9/misspell v0.3.4 diff --git a/go.sum b/go.sum index 9b73d5796e8..d00fd272003 100644 --- a/go.sum +++ b/go.sum @@ -36,8 +36,8 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.19.39/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.21 h1:ikvfTGgl09JB7LBK7V4RldG7q07SoSdFO5Kq1QZOWkM= -github.com/aws/aws-sdk-go v1.25.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.22 h1:DXcA0jjMnGt2awoWM2qwCu+ouGDB5FYnGxCVrRweE/0= +github.com/aws/aws-sdk-go v1.25.22/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs= github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 633784fe143..8d4da6459ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.21" +const SDKVersion = "1.25.22" diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go index 8a325ba01ba..a04d00dd34e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go @@ -5014,7 +5014,13 @@ type CreateFleetInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation - // creates a new session with temporary credentials. + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -5315,7 +5321,13 @@ type CreateImageBuilderInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the image builder. // To assume a role, the image builder calls the AWS Security Token Service // (STS) AssumeRole API operation and passes the ARN of the role to use. The - // operation creates a new session with temporary credentials. + // operation creates a new session with temporary credentials. AppStream 2.0 + // retrieves the temporary credentials and creates the AppStream_Machine_Role + // credential profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The ARN of the public, private, or shared image to use. @@ -5324,7 +5336,48 @@ type CreateImageBuilderInput struct { // The name of the image used to create the image builder. ImageName *string `min:"1" type:"string"` - // The instance type to use when launching the image builder. + // The instance type to use when launching the image builder. The following + // instance types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -5606,6 +5659,11 @@ type CreateStackInput struct { // The stack name to display. DisplayName *string `type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they click the Send Feedback link. // If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -5657,6 +5715,9 @@ func (s *CreateStackInput) Validate() error { if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -5732,6 +5793,12 @@ func (s *CreateStackInput) SetDisplayName(v string) *CreateStackInput { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *CreateStackInput) SetEmbedHostDomains(v []*string) *CreateStackInput { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *CreateStackInput) SetFeedbackURL(v string) *CreateStackInput { s.FeedbackURL = &v @@ -7947,7 +8014,13 @@ type Fleet struct { // The ARN of the IAM role that is applied to the fleet. To assume a role, the // fleet instance calls the AWS Security Token Service (STS) AssumeRole API // operation and passes the ARN of the role to use. The operation creates a - // new session with temporary credentials. + // new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -7980,7 +8053,48 @@ type Fleet struct { // The name of the image used to create the fleet. ImageName *string `min:"1" type:"string"` - // The instance type to use when launching fleet instances. + // The instance type to use when launching fleet instances. The following instance + // types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -8365,7 +8479,13 @@ type ImageBuilder struct { // The ARN of the IAM role that is applied to the image builder. To assume a // role, the image builder calls the AWS Security Token Service (STS) AssumeRole // API operation and passes the ARN of the role to use. The operation creates - // a new session with temporary credentials. + // a new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The ARN of the image from which this builder was created. @@ -8374,7 +8494,48 @@ type ImageBuilder struct { // The image builder errors. ImageBuilderErrors []*ResourceError `type:"list"` - // The instance type for the image builder. + // The instance type for the image builder. The following instance types are + // available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge InstanceType *string `min:"1" type:"string"` // The name of the image builder. @@ -9208,6 +9369,11 @@ type Stack struct { // The stack name to display. DisplayName *string `min:"1" type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they click the Send Feedback link. // If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -9277,6 +9443,12 @@ func (s *Stack) SetDisplayName(v string) *Stack { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *Stack) SetEmbedHostDomains(v []*string) *Stack { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *Stack) SetFeedbackURL(v string) *Stack { s.FeedbackURL = &v @@ -9927,7 +10099,13 @@ type UpdateFleetInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation - // creates a new session with temporary credentials. + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -10280,6 +10458,11 @@ type UpdateStackInput struct { // The stack name to display. DisplayName *string `type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they choose the Send Feedback // link. If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -10316,6 +10499,9 @@ func (s *UpdateStackInput) Validate() error { if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -10403,6 +10589,12 @@ func (s *UpdateStackInput) SetDisplayName(v string) *UpdateStackInput { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *UpdateStackInput) SetEmbedHostDomains(v []*string) *UpdateStackInput { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *UpdateStackInput) SetFeedbackURL(v string) *UpdateStackInput { s.FeedbackURL = &v @@ -11128,6 +11320,9 @@ const ( // StackAttributeUserSettings is a StackAttribute enum value StackAttributeUserSettings = "USER_SETTINGS" + // StackAttributeEmbedHostDomains is a StackAttribute enum value + StackAttributeEmbedHostDomains = "EMBED_HOST_DOMAINS" + // StackAttributeIamRoleArn is a StackAttribute enum value StackAttributeIamRoleArn = "IAM_ROLE_ARN" diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go index 0710f084449..e6eda869ff2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go @@ -1810,11 +1810,15 @@ type EnvironmentLifecycle struct { // The current creation or deletion lifecycle state of the environment. // + // * CREATING: The environment is in the process of being created. + // // * CREATED: The environment was successfully created. // - // * DELETE_FAILED: The environment failed to delete. + // * CREATE_FAILED: The environment failed to be created. // // * DELETING: The environment is in the process of being deleted. + // + // * DELETE_FAILED: The environment failed to delete. Status *string `locationName:"status" type:"string" enum:"EnvironmentLifecycleStatus"` } @@ -2157,9 +2161,15 @@ func (s UpdateEnvironmentOutput) GoString() string { } const ( + // EnvironmentLifecycleStatusCreating is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreating = "CREATING" + // EnvironmentLifecycleStatusCreated is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusCreated = "CREATED" + // EnvironmentLifecycleStatusCreateFailed is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreateFailed = "CREATE_FAILED" + // EnvironmentLifecycleStatusDeleting is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusDeleting = "DELETING" diff --git a/vendor/modules.txt b/vendor/modules.txt index 25315a1fd39..76ce9ce1c3e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.25.21 +# github.com/aws/aws-sdk-go v1.25.22 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr From 988a6afdacf9c16787ca1ace1e75fe9f726f59af Mon Sep 17 00:00:00 2001 From: Barak Ben-David Date: Tue, 29 Oct 2019 12:49:56 -0700 Subject: [PATCH 50/55] docs: Clean up data source list syntax (#10223) --- website/docs/d/subnet_ids.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/subnet_ids.html.markdown b/website/docs/d/subnet_ids.html.markdown index 32f53f35700..7aab11df7be 100644 --- a/website/docs/d/subnet_ids.html.markdown +++ b/website/docs/d/subnet_ids.html.markdown @@ -26,7 +26,7 @@ data "aws_subnet" "example" { } output "subnet_cidr_blocks" { - value = ["${data.aws_subnet.example.*.cidr_block}"] + value = "${data.aws_subnet.example.*.cidr_block}" } ``` From c64f19eebf6e1c5d31374b02def9226b2945f52c Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 29 Oct 2019 18:05:39 -0400 Subject: [PATCH 51/55] Update CHANGELOG for #10657 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e0ac4f0dec..d136b9e2a6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ BUG FIXES: * resource/aws_cloudhsm_v2_cluster: Ensure multiple tag configurations are applied correctly [GH-10309] * resource/aws_cloudhsm_v2_cluster: Perform drift detection with tags [GH-10309] * resource/aws_dx_gateway_association: Fix backwards compatibility issue with missing `dx_gateway_association_id` attribute [GH-8776] +* resource/aws_s3_bucket: Bypass `MethodNotAllowed` errors for Object Lock Configuration on read (support AWS C2S) [GH-10657] ## 2.33.0 (October 17, 2019) From 863148930bd40a95cd5bfd33b370937ce9266665 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 29 Oct 2019 20:46:41 -0400 Subject: [PATCH 52/55] resource/aws_glue_job: Address PR #10237 feedback Reference: https://github.com/terraform-providers/terraform-provider-aws/pull/10237#pullrequestreview-300963135 Output from acceptance testing in AWS Commercial: ``` --- PASS: TestAccAWSGlueJob_AllocatedCapacity (52.40s) --- PASS: TestAccAWSGlueJob_Timeout (52.61s) --- PASS: TestAccAWSGlueJob_ExecutionProperty (57.79s) --- PASS: TestAccAWSGlueJob_Description (61.93s) --- PASS: TestAccAWSGlueJob_Command (62.62s) --- PASS: TestAccAWSGlueJob_Basic (72.67s) --- PASS: TestAccAWSGlueJob_DefaultArguments (74.90s) --- PASS: TestAccAWSGlueJob_MaxRetries (81.59s) --- PASS: TestAccAWSGlueJob_GlueVersion (88.02s) --- PASS: TestAccAWSGlueJob_SecurityConfiguration (99.31s) --- PASS: TestAccAWSGlueJob_PythonShell (105.36s) --- PASS: TestAccAWSGlueJob_MaxCapacity (124.59s) ``` Output from acceptance testing in AWS GovCloud (US) (new test failure acceptable in this partition): ``` --- FAIL: TestAccAWSGlueJob_GlueVersion (34.67s) testing.go:615: Step 0 error: Check failed: Check 2/2 error: aws_glue_job.test: Attribute 'glue_version' expected "0.9", got "" --- PASS: TestAccAWSGlueJob_Command (51.17s) --- PASS: TestAccAWSGlueJob_DefaultArguments (67.58s) --- PASS: TestAccAWSGlueJob_MaxRetries (68.45s) --- PASS: TestAccAWSGlueJob_SecurityConfiguration (75.77s) --- PASS: TestAccAWSGlueJob_PythonShell (81.86s) --- PASS: TestAccAWSGlueJob_Description (86.03s) --- PASS: TestAccAWSGlueJob_MaxCapacity (95.51s) --- PASS: TestAccAWSGlueJob_Timeout (103.41s) --- PASS: TestAccAWSGlueJob_Basic (120.65s) --- PASS: TestAccAWSGlueJob_ExecutionProperty (134.29s) --- PASS: TestAccAWSGlueJob_AllocatedCapacity (143.32s) ``` --- aws/resource_aws_glue_job.go | 1 + website/docs/r/glue_job.html.markdown | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/aws/resource_aws_glue_job.go b/aws/resource_aws_glue_job.go index 3d91410132c..285be72f5fa 100644 --- a/aws/resource_aws_glue_job.go +++ b/aws/resource_aws_glue_job.go @@ -69,6 +69,7 @@ func resourceAwsGlueJob() *schema.Resource { "glue_version": { Type: schema.TypeString, Optional: true, + Computed: true, }, "execution_property": { Type: schema.TypeList, diff --git a/website/docs/r/glue_job.html.markdown b/website/docs/r/glue_job.html.markdown index 3677b9e9a50..43b1a8a96c4 100644 --- a/website/docs/r/glue_job.html.markdown +++ b/website/docs/r/glue_job.html.markdown @@ -54,14 +54,14 @@ be removed in future releases, please use `max_capacity` instead. * `default_arguments` – (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. * `description` – (Optional) Description of the job. * `execution_property` – (Optional) Execution property of the job. Defined below. +* `glue_version` - (Optional) The version of glue to use, for example "1.0". For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `max_capacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. * `max_retries` – (Optional) The maximum number of times to retry this job if it fails. * `name` – (Required) The name you assign to this job. It must be unique in your account. * `role_arn` – (Required) The ARN of the IAM role associated with this job. * `timeout` – (Optional) The job timeout in minutes. The default is 2880 minutes (48 hours). * `security_configuration` - (Optional) The name of the Security Configuration to be associated with the job. -* `glue_version` - (Optional) The version of glue to use, for example "1.0". For information about available versionse see [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). - + ### command Argument Reference * `name` - (Optional) The name of the job command. Defaults to `glueetl` From a90ca3f9b12514b510f7a97b4cd188c2a749e142 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Tue, 29 Oct 2019 20:50:51 -0400 Subject: [PATCH 53/55] Update CHANGELOG for #10237 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d136b9e2a6c..79ece573a07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ ## 2.34.0 (Unreleased) +ENHANCEMENTS: + +* resource/aws_glue_job: Add `glue_version` argument [GH-10237] + BUG FIXES: * resource/aws_backup_plan: Correctly handle changes to `recovery_point_tags` arguments [GH-10641] From f1a57ec070cffc6f30f03333d257c7bcb938b715 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 30 Oct 2019 09:55:15 -0400 Subject: [PATCH 54/55] Update CHANGELOG for #10620 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 79ece573a07..f147038c2d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ ENHANCEMENTS: * resource/aws_glue_job: Add `glue_version` argument [GH-10237] +* resource/aws_storagegateway_smb_file_share: Add `tags` argument [GH-10620] BUG FIXES: From 7699f5e10de0b21cd675025293e53f51a3e30191 Mon Sep 17 00:00:00 2001 From: Brian Flad Date: Wed, 30 Oct 2019 10:01:16 -0400 Subject: [PATCH 55/55] Update CHANGELOG for #10380 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f147038c2d5..b692357da9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ENHANCEMENTS: +* resource/aws_elasticache_replication_group: Add `kms_key_id` argument (support KMS encryption) [GH-10380] * resource/aws_glue_job: Add `glue_version` argument [GH-10237] * resource/aws_storagegateway_smb_file_share: Add `tags` argument [GH-10620]