diff --git a/CHANGELOG.md b/CHANGELOG.md index 47f77524e25..b692357da9f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,19 @@ ## 2.34.0 (Unreleased) +ENHANCEMENTS: + +* resource/aws_elasticache_replication_group: Add `kms_key_id` argument (support KMS encryption) [GH-10380] +* resource/aws_glue_job: Add `glue_version` argument [GH-10237] +* resource/aws_storagegateway_smb_file_share: Add `tags` argument [GH-10620] + BUG FIXES: +* resource/aws_backup_plan: Correctly handle changes to `recovery_point_tags` arguments [GH-10641] +* resource/aws_backup_plan: Prevent `diffs didn't match` errors with `rule` configuration blocks [GH-10641] * resource/aws_cloudhsm_v2_cluster: Ensure multiple tag configurations are applied correctly [GH-10309] * resource/aws_cloudhsm_v2_cluster: Perform drift detection with tags [GH-10309] +* resource/aws_dx_gateway_association: Fix backwards compatibility issue with missing `dx_gateway_association_id` attribute [GH-8776] +* resource/aws_s3_bucket: Bypass `MethodNotAllowed` errors for Object Lock Configuration on read (support AWS C2S) [GH-10657] ## 2.33.0 (October 17, 2019) diff --git a/aws/resource_aws_acm_certificate_test.go b/aws/resource_aws_acm_certificate_test.go index afbd13349bc..694d02c30ec 100644 --- a/aws/resource_aws_acm_certificate_test.go +++ b/aws/resource_aws_acm_certificate_test.go @@ -525,6 +525,7 @@ func TestAccAWSAcmCertificate_tags(t *testing.T) { }) } +//lintignore:AT002 func TestAccAWSAcmCertificate_imported_DomainName(t *testing.T) { resourceName := "aws_acm_certificate.test" @@ -558,8 +559,8 @@ func TestAccAWSAcmCertificate_imported_DomainName(t *testing.T) { }) } -// Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/7103 -func TestAccAWSAcmCertificate_imported_IpAddress(t *testing.T) { +//lintignore:AT002 +func TestAccAWSAcmCertificate_imported_IpAddress(t *testing.T) { // Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/7103 resourceName := "aws_acm_certificate.test" resource.ParallelTest(t, resource.TestCase{ diff --git a/aws/resource_aws_api_gateway_account_test.go b/aws/resource_aws_api_gateway_account_test.go index 8fd6d1edbae..6f10529d25b 100644 --- a/aws/resource_aws_api_gateway_account_test.go +++ b/aws/resource_aws_api_gateway_account_test.go @@ -11,34 +11,13 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSAPIGatewayAccount_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_account.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayAccountDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayAccountConfig_empty, - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSAPIGatewayAccount_basic(t *testing.T) { var conf apigateway.Account rInt := acctest.RandInt() firstName := fmt.Sprintf("tf_acc_api_gateway_cloudwatch_%d", rInt) secondName := fmt.Sprintf("tf_acc_api_gateway_cloudwatch_modified_%d", rInt) - + resourceName := "aws_api_gateway_account.test" expectedRoleArn_first := regexp.MustCompile(":role/" + firstName + "$") expectedRoleArn_second := regexp.MustCompile(":role/" + secondName + "$") @@ -50,23 +29,29 @@ func TestAccAWSAPIGatewayAccount_basic(t *testing.T) { { Config: testAccAWSAPIGatewayAccountConfig_updated(firstName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_first), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_first), + resource.TestMatchResourceAttr(resourceName, "cloudwatch_role_arn", expectedRoleArn_first), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cloudwatch_role_arn"}, + }, { Config: testAccAWSAPIGatewayAccountConfig_updated2(secondName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), - resource.TestMatchResourceAttr("aws_api_gateway_account.test", "cloudwatch_role_arn", expectedRoleArn_second), + resource.TestMatchResourceAttr(resourceName, "cloudwatch_role_arn", expectedRoleArn_second), ), }, { Config: testAccAWSAPIGatewayAccountConfig_empty, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayAccountExists("aws_api_gateway_account.test", &conf), + testAccCheckAWSAPIGatewayAccountExists(resourceName, &conf), testAccCheckAWSAPIGatewayAccountCloudwatchRoleArn(&conf, expectedRoleArn_second), ), }, diff --git a/aws/resource_aws_api_gateway_client_certificate_test.go b/aws/resource_aws_api_gateway_client_certificate_test.go index 111dd4ccfaa..418ff1a2e62 100644 --- a/aws/resource_aws_api_gateway_client_certificate_test.go +++ b/aws/resource_aws_api_gateway_client_certificate_test.go @@ -13,6 +13,7 @@ import ( func TestAccAWSAPIGatewayClientCertificate_basic(t *testing.T) { var conf apigateway.ClientCertificate + resourceName := "aws_api_gateway_client_certificate.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -22,38 +23,22 @@ func TestAccAWSAPIGatewayClientCertificate_basic(t *testing.T) { { Config: testAccAWSAPIGatewayClientCertificateConfig_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayClientCertificateExists("aws_api_gateway_client_certificate.cow", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_client_certificate.cow", "description", "Hello from TF acceptance test"), + testAccCheckAWSAPIGatewayClientCertificateExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "Hello from TF acceptance test"), ), }, - { - Config: testAccAWSAPIGatewayClientCertificateConfig_basic_updated, - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayClientCertificateExists("aws_api_gateway_client_certificate.cow", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_client_certificate.cow", "description", "Hello from TF acceptance test - updated"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayClientCertificate_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_client_certificate.cow" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayClientCertificateDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayClientCertificateConfig_basic, - }, - { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAWSAPIGatewayClientCertificateConfig_basic_updated, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAPIGatewayClientCertificateExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "Hello from TF acceptance test - updated"), + ), + }, }, }) } @@ -116,13 +101,13 @@ func testAccCheckAWSAPIGatewayClientCertificateDestroy(s *terraform.State) error } const testAccAWSAPIGatewayClientCertificateConfig_basic = ` -resource "aws_api_gateway_client_certificate" "cow" { +resource "aws_api_gateway_client_certificate" "test" { description = "Hello from TF acceptance test" } ` const testAccAWSAPIGatewayClientCertificateConfig_basic_updated = ` -resource "aws_api_gateway_client_certificate" "cow" { +resource "aws_api_gateway_client_certificate" "test" { description = "Hello from TF acceptance test - updated" } ` diff --git a/aws/resource_aws_api_gateway_documentation_part_test.go b/aws/resource_aws_api_gateway_documentation_part_test.go index b0df32e5cd1..5b86b42ed86 100644 --- a/aws/resource_aws_api_gateway_documentation_part_test.go +++ b/aws/resource_aws_api_gateway_documentation_part_test.go @@ -37,6 +37,11 @@ func TestAccAWSAPIGatewayDocumentationPart_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -78,6 +83,11 @@ func TestAccAWSAPIGatewayDocumentationPart_method(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartMethodConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -123,6 +133,11 @@ func TestAccAWSAPIGatewayDocumentationPart_responseHeader(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationPartResponseHeaderConfig(apiName, strconv.Quote(uProperties)), Check: resource.ComposeTestCheckFunc( @@ -141,39 +156,6 @@ func TestAccAWSAPIGatewayDocumentationPart_responseHeader(t *testing.T) { }) } -func TestAccAWSAPIGatewayDocumentationPart_importBasic(t *testing.T) { - var conf apigateway.DocumentationPart - - rString := acctest.RandString(8) - apiName := fmt.Sprintf("tf_acc_api_doc_part_import_%s", rString) - properties := `{"description":"Terraform Acceptance Test"}` - - resourceName := "aws_api_gateway_documentation_part.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationPartDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationPartConfig(apiName, strconv.Quote(properties)), - Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayDocumentationPartExists(resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, "location.#", "1"), - resource.TestCheckResourceAttr(resourceName, "location.0.type", "API"), - resource.TestCheckResourceAttr(resourceName, "properties", properties), - resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckAWSAPIGatewayDocumentationPartExists(n string, res *apigateway.DocumentationPart) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_api_gateway_documentation_version_test.go b/aws/resource_aws_api_gateway_documentation_version_test.go index c5f1c44e84f..b28788107e8 100644 --- a/aws/resource_aws_api_gateway_documentation_version_test.go +++ b/aws/resource_aws_api_gateway_documentation_version_test.go @@ -33,6 +33,11 @@ func TestAccAWSAPIGatewayDocumentationVersion_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -63,6 +68,11 @@ func TestAccAWSAPIGatewayDocumentationVersion_allFields(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "rest_api_id"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSAPIGatewayDocumentationVersionAllFieldsConfig(version, apiName, stageName, uDescription), Check: resource.ComposeTestCheckFunc( @@ -76,56 +86,6 @@ func TestAccAWSAPIGatewayDocumentationVersion_allFields(t *testing.T) { }) } -func TestAccAWSAPIGatewayDocumentationVersion_importBasic(t *testing.T) { - rString := acctest.RandString(8) - version := fmt.Sprintf("tf_acc_version_import_%s", rString) - apiName := fmt.Sprintf("tf_acc_api_doc_version_import_%s", rString) - - resourceName := "aws_api_gateway_documentation_version.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationVersionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationVersionBasicConfig(version, apiName), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccAWSAPIGatewayDocumentationVersion_importAllFields(t *testing.T) { - rString := acctest.RandString(8) - version := fmt.Sprintf("tf_acc_version_import_af_%s", rString) - apiName := fmt.Sprintf("tf_acc_api_doc_version_import_af_%s", rString) - stageName := fmt.Sprintf("tf_acc_stage_%s", rString) - description := fmt.Sprintf("Tf Acc Test description %s", rString) - - resourceName := "aws_api_gateway_documentation_version.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayDocumentationVersionDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSAPIGatewayDocumentationVersionAllFieldsConfig(version, apiName, stageName, description), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func testAccCheckAWSAPIGatewayDocumentationVersionExists(n string, res *apigateway.DocumentationVersion) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_api_gateway_usage_plan_test.go b/aws/resource_aws_api_gateway_usage_plan_test.go index 1183edfaf14..47036795eb7 100644 --- a/aws/resource_aws_api_gateway_usage_plan_test.go +++ b/aws/resource_aws_api_gateway_usage_plan_test.go @@ -12,32 +12,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSAPIGatewayUsagePlan_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_usage_plan.main" - rName := acctest.RandString(10) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayUsagePlanDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSApiGatewayUsagePlanBasicConfig(rName), - }, - - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) updatedName := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -47,24 +26,29 @@ func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(updatedName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", updatedName), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + resource.TestCheckResourceAttr(resourceName, "name", updatedName), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, }, @@ -74,6 +58,7 @@ func TestAccAWSAPIGatewayUsagePlan_basic(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -83,35 +68,40 @@ func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "This is a description"), ), }, { Config: testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a new description"), + resource.TestCheckResourceAttr(resourceName, "description", "This is a new description"), ), }, { Config: testAccAWSApiGatewayUsagePlanDescriptionConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", "This is a description"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", "This is a description"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "description", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "description", ""), ), }, }, @@ -121,6 +111,7 @@ func TestAccAWSAPIGatewayUsagePlan_description(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -130,35 +121,40 @@ func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", ""), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE"), ), }, { Config: testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(name), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE2"), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE2"), ), }, { Config: testAccAWSApiGatewayUsagePlanProductCodeConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", "MYCODE"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", "MYCODE"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "product_code", ""), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "product_code", ""), ), }, }, @@ -168,6 +164,7 @@ func TestAccAWSAPIGatewayUsagePlan_productCode(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -177,35 +174,40 @@ func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "throttle_settings"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.burst_limit", "2"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.burst_limit", "2"), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.rate_limit", "5"), ), }, { Config: testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.burst_limit", "3"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.1779463053.rate_limit", "6"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.1779463053.burst_limit", "3"), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.1779463053.rate_limit", "6"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "throttle_settings"), ), }, }, @@ -216,6 +218,7 @@ func TestAccAWSAPIGatewayUsagePlan_throttling(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -225,10 +228,15 @@ func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanThrottlingConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "throttle_settings.4173790118.rate_limit", "5"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "throttle_settings.4173790118.rate_limit", "5"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -236,6 +244,7 @@ func TestAccAWSAPIGatewayUsagePlan_throttlingInitialRateLimit(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -245,37 +254,42 @@ func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "quota_settings"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccAWSApiGatewayUsagePlanQuotaConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.limit", "100"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.offset", "6"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.1956747625.period", "WEEK"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.limit", "100"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.offset", "6"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.1956747625.period", "WEEK"), ), }, { Config: testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.limit", "200"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.offset", "20"), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings.3909168194.period", "MONTH"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.limit", "200"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.offset", "20"), + resource.TestCheckResourceAttr(resourceName, "quota_settings.3909168194.period", "MONTH"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "quota_settings"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "quota_settings"), ), }, }, @@ -285,6 +299,7 @@ func TestAccAWSAPIGatewayUsagePlan_quota(t *testing.T) { func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) { var conf apigateway.UsagePlan name := acctest.RandString(10) + resourceName := "aws_api_gateway_usage_plan.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -296,44 +311,49 @@ func TestAccAWSAPIGatewayUsagePlan_apiStages(t *testing.T) { { Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "test"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, // Handle api stages removal { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "api_stages"), ), }, // Handle api stages additions { Config: testAccAWSApiGatewayUsagePlanApiStagesConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "test"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "test"), ), }, // Handle api stages updates { Config: testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "api_stages.0.stage", "foo"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckResourceAttr(resourceName, "api_stages.0.stage", "foo"), ), }, { Config: testAccAWSApiGatewayUsagePlanBasicConfig(name), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAPIGatewayUsagePlanExists("aws_api_gateway_usage_plan.main", &conf), - resource.TestCheckResourceAttr("aws_api_gateway_usage_plan.main", "name", name), - resource.TestCheckNoResourceAttr("aws_api_gateway_usage_plan.main", "api_stages"), + testAccCheckAWSAPIGatewayUsagePlanExists(resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "name", name), + resource.TestCheckNoResourceAttr(resourceName, "api_stages"), ), }, }, @@ -469,7 +489,7 @@ resource "aws_api_gateway_deployment" "foo" { func testAccAWSApiGatewayUsagePlanBasicConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" } `, rName) @@ -477,7 +497,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanDescriptionConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" description = "This is a description" } @@ -486,7 +506,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanDescriptionUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" description = "This is a new description" } @@ -495,7 +515,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanProductCodeConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" product_code = "MYCODE" } @@ -504,7 +524,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanProductCodeUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" product_code = "MYCODE2" } @@ -513,7 +533,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanBasicUpdatedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" } `, rName) @@ -521,7 +541,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanThrottlingConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" throttle_settings { @@ -534,7 +554,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanThrottlingModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" throttle_settings { @@ -547,7 +567,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanQuotaConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" quota_settings { @@ -561,7 +581,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanQuotaModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" quota_settings { @@ -575,7 +595,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanApiStagesConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" api_stages { @@ -588,7 +608,7 @@ resource "aws_api_gateway_usage_plan" "main" { func testAccAWSApiGatewayUsagePlanApiStagesModifiedConfig(rName string) string { return fmt.Sprintf(testAccAWSAPIGatewayUsagePlanConfig+` -resource "aws_api_gateway_usage_plan" "main" { +resource "aws_api_gateway_usage_plan" "test" { name = "%s" api_stages { diff --git a/aws/resource_aws_api_gateway_vpc_link_test.go b/aws/resource_aws_api_gateway_vpc_link_test.go index f6c349848d4..b70efd7dc21 100644 --- a/aws/resource_aws_api_gateway_vpc_link_test.go +++ b/aws/resource_aws_api_gateway_vpc_link_test.go @@ -59,35 +59,6 @@ func testSweepAPIGatewayVpcLinks(region string) error { } func TestAccAWSAPIGatewayVpcLink_basic(t *testing.T) { - rName := acctest.RandString(5) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsAPIGatewayVpcLinkDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAPIGatewayVpcLinkConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAPIGatewayVpcLinkExists("aws_api_gateway_vpc_link.test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "name", fmt.Sprintf("tf-apigateway-%s", rName)), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "description", "test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "target_arns.#", "1"), - ), - }, - { - Config: testAccAPIGatewayVpcLinkConfig_Update(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckAwsAPIGatewayVpcLinkExists("aws_api_gateway_vpc_link.test"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "name", fmt.Sprintf("tf-apigateway-update-%s", rName)), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "description", "test update"), - resource.TestCheckResourceAttr("aws_api_gateway_vpc_link.test", "target_arns.#", "1"), - ), - }, - }, - }) -} - -func TestAccAWSAPIGatewayVpcLink_importBasic(t *testing.T) { rName := acctest.RandString(5) resourceName := "aws_api_gateway_vpc_link.test" @@ -98,12 +69,27 @@ func TestAccAWSAPIGatewayVpcLink_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAPIGatewayVpcLinkConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAPIGatewayVpcLinkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("tf-apigateway-%s", rName)), + resource.TestCheckResourceAttr(resourceName, "description", "test"), + resource.TestCheckResourceAttr(resourceName, "target_arns.#", "1"), + ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccAPIGatewayVpcLinkConfig_Update(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAPIGatewayVpcLinkExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "name", fmt.Sprintf("tf-apigateway-update-%s", rName)), + resource.TestCheckResourceAttr(resourceName, "description", "test update"), + resource.TestCheckResourceAttr(resourceName, "target_arns.#", "1"), + ), + }, }, }) } diff --git a/aws/resource_aws_athena_named_query_test.go b/aws/resource_aws_athena_named_query_test.go index 49708022ca1..94d0ee2fd36 100644 --- a/aws/resource_aws_athena_named_query_test.go +++ b/aws/resource_aws_athena_named_query_test.go @@ -12,6 +12,8 @@ import ( ) func TestAccAWSAthenaNamedQuery_basic(t *testing.T) { + resourceName := "aws_athena_named_query.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -20,14 +22,21 @@ func TestAccAWSAthenaNamedQuery_basic(t *testing.T) { { Config: testAccAthenaNamedQueryConfig(acctest.RandInt(), acctest.RandString(5)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAthenaNamedQueryExists("aws_athena_named_query.foo"), + testAccCheckAWSAthenaNamedQueryExists(resourceName), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } func TestAccAWSAthenaNamedQuery_withWorkGroup(t *testing.T) { + resourceName := "aws_athena_named_query.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -36,25 +45,9 @@ func TestAccAWSAthenaNamedQuery_withWorkGroup(t *testing.T) { { Config: testAccAthenaNamedWorkGroupQueryConfig(acctest.RandInt(), acctest.RandString(5)), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSAthenaNamedQueryExists("aws_athena_named_query.bar"), + testAccCheckAWSAthenaNamedQueryExists(resourceName), ), }, - }, - }) -} - -func TestAccAWSAthenaNamedQuery_import(t *testing.T) { - resourceName := "aws_athena_named_query.foo" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAthenaNamedQueryDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAthenaNamedQueryConfig(acctest.RandInt(), acctest.RandString(5)), - }, - { ResourceName: resourceName, ImportState: true, @@ -109,20 +102,20 @@ func testAccCheckAWSAthenaNamedQueryExists(name string) resource.TestCheckFunc { func testAccAthenaNamedQueryConfig(rInt int, rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "hoge" { +resource "aws_s3_bucket" "test" { bucket = "tf-athena-db-%s-%d" force_destroy = true } -resource "aws_athena_database" "hoge" { +resource "aws_athena_database" "test" { name = "%s" - bucket = "${aws_s3_bucket.hoge.bucket}" + bucket = "${aws_s3_bucket.test.bucket}" } -resource "aws_athena_named_query" "foo" { +resource "aws_athena_named_query" "test" { name = "tf-athena-named-query-%s" - database = "${aws_athena_database.hoge.name}" - query = "SELECT * FROM ${aws_athena_database.hoge.name} limit 10;" + database = "${aws_athena_database.test.name}" + query = "SELECT * FROM ${aws_athena_database.test.name} limit 10;" description = "tf test" } `, rName, rInt, rName, rName) @@ -130,7 +123,7 @@ resource "aws_athena_named_query" "foo" { func testAccAthenaNamedWorkGroupQueryConfig(rInt int, rName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "hoge" { +resource "aws_s3_bucket" "test" { bucket = "tf-athena-db-%s-%d" force_destroy = true } @@ -139,16 +132,16 @@ resource "aws_athena_workgroup" "test" { name = "tf-athena-workgroup-%s-%d" } -resource "aws_athena_database" "hoge" { +resource "aws_athena_database" "test" { name = "%s" - bucket = "${aws_s3_bucket.hoge.bucket}" + bucket = "${aws_s3_bucket.test.bucket}" } -resource "aws_athena_named_query" "bar" { +resource "aws_athena_named_query" "test" { name = "tf-athena-named-query-%s" workgroup = "${aws_athena_workgroup.test.id}" - database = "${aws_athena_database.hoge.name}" - query = "SELECT * FROM ${aws_athena_database.hoge.name} limit 10;" + database = "${aws_athena_database.test.name}" + query = "SELECT * FROM ${aws_athena_database.test.name} limit 10;" description = "tf test" } `, rName, rInt, rName, rInt, rName, rName) diff --git a/aws/resource_aws_backup_plan.go b/aws/resource_aws_backup_plan.go index bf6d542457e..1a895826e15 100644 --- a/aws/resource_aws_backup_plan.go +++ b/aws/resource_aws_backup_plan.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/backup" "github.com/hashicorp/terraform-plugin-sdk/helper/hashcode" "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + "github.com/terraform-providers/terraform-provider-aws/aws/internal/keyvaluetags" ) func resourceAwsBackupPlan() *schema.Resource { @@ -68,14 +69,10 @@ func resourceAwsBackupPlan() *schema.Resource { }, }, }, - "recovery_point_tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, + "recovery_point_tags": tagsSchema(), }, }, - Set: resourceAwsPlanRuleHash, + Set: backupBackupPlanHash, }, "arn": { Type: schema.TypeString, @@ -93,28 +90,21 @@ func resourceAwsBackupPlan() *schema.Resource { func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - input := &backup.CreateBackupPlanInput{ - BackupPlan: plan, - } - - if v, ok := d.GetOk("tags"); ok { - input.BackupPlanTags = tagsFromMapGeneric(v.(map[string]interface{})) + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, + BackupPlanTags: keyvaluetags.New(d.Get("tags").(map[string]interface{})).IgnoreAws().BackupTags(), } + log.Printf("[DEBUG] Creating Backup Plan: %#v", input) resp, err := conn.CreateBackupPlan(input) if err != nil { return fmt.Errorf("error creating Backup Plan: %s", err) } - d.SetId(*resp.BackupPlanId) + d.SetId(aws.StringValue(resp.BackupPlanId)) return resourceAwsBackupPlanRead(d, meta) } @@ -122,123 +112,60 @@ func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.GetBackupPlanInput{ + resp, err := conn.GetBackupPlan(&backup.GetBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - resp, err := conn.GetBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { log.Printf("[WARN] Backup Plan (%s) not found, removing from state", d.Id()) d.SetId("") return nil } - if err != nil { - return fmt.Errorf("error reading Backup Plan: %s", err) + return fmt.Errorf("error reading Backup Plan (%s): %s", d.Id(), err) } - rule := &schema.Set{F: resourceAwsPlanRuleHash} - - for _, r := range resp.BackupPlan.Rules { - m := make(map[string]interface{}) - - m["completion_window"] = aws.Int64Value(r.CompletionWindowMinutes) - m["recovery_point_tags"] = aws.StringValueMap(r.RecoveryPointTags) - m["rule_name"] = aws.StringValue(r.RuleName) - m["schedule"] = aws.StringValue(r.ScheduleExpression) - m["start_window"] = aws.Int64Value(r.StartWindowMinutes) - m["target_vault_name"] = aws.StringValue(r.TargetBackupVaultName) - - if r.Lifecycle != nil { - l := make(map[string]interface{}) - l["delete_after"] = aws.Int64Value(r.Lifecycle.DeleteAfterDays) - l["cold_storage_after"] = aws.Int64Value(r.Lifecycle.MoveToColdStorageAfterDays) - m["lifecycle"] = []interface{}{l} - } + d.Set("arn", resp.BackupPlanArn) + d.Set("name", resp.BackupPlan.BackupPlanName) + d.Set("version", resp.VersionId) - rule.Add(m) - } - if err := d.Set("rule", rule); err != nil { + if err := d.Set("rule", flattenBackupPlanRules(resp.BackupPlan.Rules)); err != nil { return fmt.Errorf("error setting rule: %s", err) } - tagsOutput, err := conn.ListTags(&backup.ListTagsInput{ - ResourceArn: resp.BackupPlanArn, - }) + tags, err := keyvaluetags.BackupListTags(conn, d.Get("arn").(string)) if err != nil { - return fmt.Errorf("error listing tags AWS Backup plan %s: %s", d.Id(), err) + return fmt.Errorf("error listing tags for Backup Plan (%s): %s", d.Id(), err) } - - if err := d.Set("tags", tagsToMapGeneric(tagsOutput.Tags)); err != nil { - return fmt.Errorf("error setting tags on AWS Backup plan %s: %s", d.Id(), err) + if err := d.Set("tags", tags.IgnoreAws().Map()); err != nil { + return fmt.Errorf("error setting tags: %s", err) } - d.Set("arn", resp.BackupPlanArn) - d.Set("version", resp.VersionId) - return nil } func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - plan := &backup.PlanInput{ - BackupPlanName: aws.String(d.Get("name").(string)), - } - - rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List()) - - plan.Rules = rules - - input := &backup.UpdateBackupPlanInput{ - BackupPlanId: aws.String(d.Id()), - BackupPlan: plan, - } + if d.HasChange("rule") { + input := &backup.UpdateBackupPlanInput{ + BackupPlanId: aws.String(d.Id()), + BackupPlan: &backup.PlanInput{ + BackupPlanName: aws.String(d.Get("name").(string)), + Rules: expandBackupPlanRules(d.Get("rule").(*schema.Set)), + }, + } - _, err := conn.UpdateBackupPlan(input) - if err != nil { - return fmt.Errorf("error updating Backup Plan: %s", err) + log.Printf("[DEBUG] Updating Backup Plan: %#v", input) + _, err := conn.UpdateBackupPlan(input) + if err != nil { + return fmt.Errorf("error updating Backup Plan (%s): %s", d.Id(), err) + } } if d.HasChange("tags") { - resourceArn := d.Get("arn").(string) - oraw, nraw := d.GetChange("tags") - create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{})) - - if len(remove) > 0 { - log.Printf("[DEBUG] Removing tags: %#v", remove) - keys := make([]*string, 0, len(remove)) - for k := range remove { - keys = append(keys, aws.String(k)) - } - - _, err := conn.UntagResource(&backup.UntagResourceInput{ - ResourceArn: aws.String(resourceArn), - TagKeyList: keys, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error removing tags for (%s): %s", d.Id(), err) - } - } - if len(create) > 0 { - log.Printf("[DEBUG] Creating tags: %#v", create) - _, err := conn.TagResource(&backup.TagResourceInput{ - ResourceArn: aws.String(resourceArn), - Tags: create, - }) - if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { - log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id()) - d.SetId("") - return nil - } - if err != nil { - return fmt.Errorf("Error setting tags for (%s): %s", d.Id(), err) - } + o, n := d.GetChange("tags") + if err := keyvaluetags.BackupUpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating tags for Backup Plan (%s): %s", d.Id(), err) } } @@ -248,66 +175,64 @@ func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error func resourceAwsBackupPlanDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).backupconn - input := &backup.DeleteBackupPlanInput{ + log.Printf("[DEBUG] Deleting Backup Plan: %s", d.Id()) + _, err := conn.DeleteBackupPlan(&backup.DeleteBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - } - - _, err := conn.DeleteBackupPlan(input) + }) if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") { return nil } if err != nil { - return fmt.Errorf("error deleting Backup Plan: %s", err) + return fmt.Errorf("error deleting Backup Plan (%s): %s", d.Id(), err) } return nil } -func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { +func expandBackupPlanRules(vRules *schema.Set) []*backup.RuleInput { rules := []*backup.RuleInput{} - for _, i := range l { - item := i.(map[string]interface{}) + for _, vRule := range vRules.List() { rule := &backup.RuleInput{} - if item["rule_name"] != "" { - rule.RuleName = aws.String(item["rule_name"].(string)) + mRule := vRule.(map[string]interface{}) + + if vRuleName, ok := mRule["rule_name"].(string); ok && vRuleName != "" { + rule.RuleName = aws.String(vRuleName) + } else { + continue } - if item["target_vault_name"] != "" { - rule.TargetBackupVaultName = aws.String(item["target_vault_name"].(string)) + if vTargetVaultName, ok := mRule["target_vault_name"].(string); ok && vTargetVaultName != "" { + rule.TargetBackupVaultName = aws.String(vTargetVaultName) } - if item["schedule"] != "" { - rule.ScheduleExpression = aws.String(item["schedule"].(string)) + if vSchedule, ok := mRule["schedule"].(string); ok && vSchedule != "" { + rule.ScheduleExpression = aws.String(vSchedule) } - if item["start_window"] != nil { - rule.StartWindowMinutes = aws.Int64(int64(item["start_window"].(int))) + if vStartWindow, ok := mRule["start_window"].(int); ok { + rule.StartWindowMinutes = aws.Int64(int64(vStartWindow)) } - if item["completion_window"] != nil { - rule.CompletionWindowMinutes = aws.Int64(int64(item["completion_window"].(int))) + if vCompletionWindow, ok := mRule["completion_window"].(int); ok { + rule.CompletionWindowMinutes = aws.Int64(int64(vCompletionWindow)) } - if item["recovery_point_tags"] != nil { - rule.RecoveryPointTags = tagsFromMapGeneric(item["recovery_point_tags"].(map[string]interface{})) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + rule.RecoveryPointTags = tagsFromMapGeneric(vRecoveryPointTags) } - var lifecycle map[string]interface{} - if i.(map[string]interface{})["lifecycle"] != nil { - lifecycleRaw := i.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lifecycleRaw) == 1 { - lifecycle = lifecycleRaw[0].(map[string]interface{}) - lcValues := &backup.Lifecycle{} - - if v, ok := lifecycle["delete_after"]; ok && v.(int) > 0 { - lcValues.DeleteAfterDays = aws.Int64(int64(v.(int))) - } - - if v, ok := lifecycle["cold_storage_after"]; ok && v.(int) > 0 { - lcValues.MoveToColdStorageAfterDays = aws.Int64(int64(v.(int))) - } - rule.Lifecycle = lcValues + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + lifecycle := &backup.Lifecycle{} + + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if vDeleteAfter, ok := mLifecycle["delete_after"].(int); ok && vDeleteAfter > 0 { + lifecycle.DeleteAfterDays = aws.Int64(int64(vDeleteAfter)) + } + if vColdStorageAfter, ok := mLifecycle["cold_storage_after"].(int); ok && vColdStorageAfter > 0 { + lifecycle.MoveToColdStorageAfterDays = aws.Int64(int64(vColdStorageAfter)) } + rule.Lifecycle = lifecycle } rules = append(rules, rule) @@ -316,46 +241,68 @@ func expandBackupPlanRules(l []interface{}) []*backup.RuleInput { return rules } -func resourceAwsPlanRuleHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - - if v.(map[string]interface{})["lifecycle"] != nil { - lcRaw := v.(map[string]interface{})["lifecycle"].([]interface{}) - if len(lcRaw) == 1 { - l := lcRaw[0].(map[string]interface{}) - if w, ok := l["delete_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) - } +func flattenBackupPlanRules(rules []*backup.Rule) *schema.Set { + vRules := []interface{}{} + + for _, rule := range rules { + mRule := map[string]interface{}{ + "rule_name": aws.StringValue(rule.RuleName), + "target_vault_name": aws.StringValue(rule.TargetBackupVaultName), + "schedule": aws.StringValue(rule.ScheduleExpression), + "start_window": int(aws.Int64Value(rule.StartWindowMinutes)), + "completion_window": int(aws.Int64Value(rule.CompletionWindowMinutes)), + "recovery_point_tags": tagsToMapGeneric(rule.RecoveryPointTags), + } - if w, ok := l["cold_storage_after"]; ok { - buf.WriteString(fmt.Sprintf("%v-", w)) + if lifecycle := rule.Lifecycle; lifecycle != nil { + mRule["lifecycle"] = []interface{}{ + map[string]interface{}{ + "delete_after": int(aws.Int64Value(lifecycle.DeleteAfterDays)), + "cold_storage_after": int(aws.Int64Value(lifecycle.MoveToColdStorageAfterDays)), + }, } } - } - if v, ok := m["completion_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + vRules = append(vRules, mRule) } - if v, ok := m["recovery_point_tags"]; ok { - buf.WriteString(fmt.Sprintf("%v-", v)) - } + return schema.NewSet(backupBackupPlanHash, vRules) +} - if v, ok := m["rule_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } +func backupBackupPlanHash(vRule interface{}) int { + var buf bytes.Buffer - if v, ok := m["schedule"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + mRule := vRule.(map[string]interface{}) + + if v, ok := mRule["rule_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["target_vault_name"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["schedule"].(string); ok { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + if v, ok := mRule["start_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mRule["completion_window"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) } - if v, ok := m["start_window"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(interface{}))) + if vRecoveryPointTags, ok := mRule["recovery_point_tags"].(map[string]interface{}); ok && len(vRecoveryPointTags) > 0 { + buf.WriteString(fmt.Sprintf("%d-", tagsMapToHash(vRecoveryPointTags))) } - if v, ok := m["target_vault_name"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) + if vLifecycle, ok := mRule["lifecycle"].([]interface{}); ok && len(vLifecycle) > 0 && vLifecycle[0] != nil { + mLifecycle := vLifecycle[0].(map[string]interface{}) + + if v, ok := mLifecycle["delete_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } + if v, ok := mLifecycle["cold_storage_after"].(int); ok { + buf.WriteString(fmt.Sprintf("%d-", v)) + } } return hashcode.String(buf.String()) diff --git a/aws/resource_aws_backup_plan_test.go b/aws/resource_aws_backup_plan_test.go index 9fd797bd852..7ce2dab20c5 100644 --- a/aws/resource_aws_backup_plan_test.go +++ b/aws/resource_aws_backup_plan_test.go @@ -14,7 +14,9 @@ import ( func TestAccAwsBackupPlan_basic(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -22,13 +24,19 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - testAccMatchResourceAttrRegionalARN("aws_backup_plan.test", "arn", "backup", regexp.MustCompile(`backup-plan:.+`)), - resource.TestCheckResourceAttrSet("aws_backup_plan.test", "version"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), - resource.TestCheckNoResourceAttr("aws_backup_plan.test", "rule.712706565.lifecycle.#"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + testAccMatchResourceAttrRegionalARN(resourceName, "arn", "backup", regexp.MustCompile(`backup-plan:.+`)), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "version"), ), }, }, @@ -37,7 +45,9 @@ func TestAccAwsBackupPlan_basic(t *testing.T) { func TestAccAwsBackupPlan_withTags(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -45,28 +55,36 @@ func TestAccAwsBackupPlan_withTags(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithTag(rInt), + Config: testAccAwsBackupPlanConfig_tags(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key1", "Value1"), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2a"), ), }, { - Config: testAccBackupPlanWithTags(rInt), + Config: testAccAwsBackupPlanConfig_tagsUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.app", "widget"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "3"), + resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.Key2", "Value2b"), + resource.TestCheckResourceAttr(resourceName, "tags.Key3", "Value3"), ), }, { - Config: testAccBackupPlanWithTag(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.%", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "tags.env", "test"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -75,7 +93,12 @@ func TestAccAwsBackupPlan_withTags(t *testing.T) { func TestAccAwsBackupPlan_withRules(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) + rule1Name := fmt.Sprintf("%s_1", rName) + rule2Name := fmt.Sprintf("%s_2", rName) + rule3Name := fmt.Sprintf("%s_3", rName) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -83,46 +106,71 @@ func TestAccAwsBackupPlan_withRules(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithRules(rInt), + Config: testAccAwsBackupPlanConfig_twoRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "rule_name", rule1Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "recovery_point_tags.%", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 6 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withRuleRemove(t *testing.T) { - var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithRules(rInt), + Config: testAccAwsBackupPlanConfig_threeRules(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "rule_name", rule1Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "schedule", "cron(0 6 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule1Name, "recovery_point_tags.%", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "rule_name", rule2Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule2Name, "recovery_point_tags.%", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "rule_name", rule3Name), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "schedule", "cron(0 18 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rule3Name, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, }) } -func TestAccAwsBackupPlan_withRuleAdd(t *testing.T) { +func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -130,68 +178,64 @@ func TestAccAwsBackupPlan_withRuleAdd(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_lifecycleColdStorageAfterOnly(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "7"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, { - Config: testAccBackupPlanWithRules(rInt), + Config: testAccAwsBackupPlanConfig_lifecycleDeleteAfterOnly(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.#", "2"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "120"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withLifecycle(t *testing.T) { - var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithLifecycle(rStr), + Config: testAccAwsBackupPlanConfig_lifecycleColdStorageAfterAndDeleteAfter(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1028372010.lifecycle.#", "1"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.cold_storage_after", "30"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.0.delete_after", "180"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, - }, - }) -} - -func TestAccAwsBackupPlan_withLifecycleDeleteAfterOnly(t *testing.T) { - var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy_two" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAwsBackupPlanDestroy, - Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithLifecycleDeleteAfterOnly(rStr), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.#", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.0.delete_after", "7"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.2156287050.lifecycle.0.cold_storage_after", "0"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), ), }, }, }) } -func TestAccAwsBackupPlan_withLifecycleColdStorageAfterOnly(t *testing.T) { +func TestAccAwsBackupPlan_withRecoveryPointTags(t *testing.T) { var plan backup.GetBackupPlanOutput - rStr := "lifecycle_policy_three" + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -199,12 +243,51 @@ func TestAccAwsBackupPlan_withLifecycleColdStorageAfterOnly(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanWithLifecycleColdStorageAfterOnly(rStr), + Config: testAccAwsBackupPlanConfig_recoveryPointTags(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key1", "Value1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key2", "Value2a"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: testAccAwsBackupPlanConfig_recoveryPointTagsUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "3"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key2", "Value2b"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.Key3", "Value3"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.#", "1"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.0.delete_after", "0"), - resource.TestCheckResourceAttr("aws_backup_plan.test", "rule.1300859512.lifecycle.0.cold_storage_after", "7"), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "rule_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "target_vault_name", rName), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "schedule", "cron(0 12 * * ? *)"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "lifecycle.#", "0"), + testAccCheckAwsBackupPlanRuleAttr(resourceName, &ruleNameMap, rName, "recovery_point_tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, }, @@ -213,7 +296,9 @@ func TestAccAwsBackupPlan_withLifecycleColdStorageAfterOnly(t *testing.T) { func TestAccAwsBackupPlan_disappears(t *testing.T) { var plan backup.GetBackupPlanOutput - rInt := acctest.RandInt() + ruleNameMap := map[string]string{} + resourceName := "aws_backup_plan.test" + rName := fmt.Sprintf("tf-testacc-backup-%s", acctest.RandStringFromCharSet(14, acctest.CharSetAlphaNum)) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSBackup(t) }, @@ -221,9 +306,9 @@ func TestAccAwsBackupPlan_disappears(t *testing.T) { CheckDestroy: testAccCheckAwsBackupPlanDestroy, Steps: []resource.TestStep{ { - Config: testAccBackupPlanConfig(rInt), + Config: testAccAwsBackupPlanConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan), + testAccCheckAwsBackupPlanExists(resourceName, &plan, &ruleNameMap), testAccCheckAwsBackupPlanDisappears(&plan), ), ExpectNonEmptyPlan: true, @@ -269,186 +354,274 @@ func testAccCheckAwsBackupPlanDisappears(backupPlan *backup.GetBackupPlanOutput) } } -func testAccCheckAwsBackupPlanExists(name string, plan *backup.GetBackupPlanOutput) resource.TestCheckFunc { +func testAccCheckAwsBackupPlanExists(name string, plan *backup.GetBackupPlanOutput, ruleNameMap *map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + conn := testAccProvider.Meta().(*AWSClient).backupconn + rs, ok := s.RootModule().Resources[name] if !ok { return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("Resource ID is not set") + return fmt.Errorf("No ID is set") } - conn := testAccProvider.Meta().(*AWSClient).backupconn - - input := &backup.GetBackupPlanInput{ + output, err := conn.GetBackupPlan(&backup.GetBackupPlanInput{ BackupPlanId: aws.String(rs.Primary.ID), - } - - output, err := conn.GetBackupPlan(input) - + }) if err != nil { return err } *plan = *output + // Build map of rule name to hash value. + re := regexp.MustCompile(`^rule\.(\d+)\.rule_name$`) + for k, v := range rs.Primary.Attributes { + matches := re.FindStringSubmatch(k) + if matches != nil { + (*ruleNameMap)[v] = matches[1] + } + } + return nil } } -func testAccBackupPlanConfig(randInt int) string { +func testAccCheckAwsBackupPlanRuleAttr(name string, ruleNameMap *map[string]string, ruleName, key, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + return resource.TestCheckResourceAttr(name, fmt.Sprintf("rule.%s.%s", (*ruleNameMap)[ruleName], key), value)(s) + } +} + +func testAccAwsBackupPlanConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithTag(randInt int) string { +func testAccAwsBackupPlanConfig_tags(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } tags = { - env = "test" + Name = %[1]q + Key1 = "Value1" + Key2 = "Value2a" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithTags(randInt int) string { +func testAccAwsBackupPlanConfig_tagsUpdated(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" } tags = { - env = "test" - app = "widget" + Name = %[1]q + Key2 = "Value2b" + Key3 = "Value3" + } +} +`, rName) +} + +func testAccAwsBackupPlanConfig_twoRules(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q + + rule { + rule_name = "%[1]s_1" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 12 * * ? *)" + } + rule { + rule_name = "%[1]s_2" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 6 * * ? *)" + } +} +`, rName) +} + +func testAccAwsBackupPlanConfig_threeRules(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q + + rule { + rule_name = "%[1]s_1" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 6 * * ? *)" + } + rule { + rule_name = "%[1]s_2" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 12 * * ? *)" + } + rule { + rule_name = "%[1]s_3" + target_vault_name = "${aws_backup_vault.test.name}" + schedule = "cron(0 18 * * ? *)" } } -`, randInt) +`, rName) } -func testAccBackupPlanWithLifecycle(stringID string) string { +func testAccAwsBackupPlanConfig_lifecycleColdStorageAfterOnly(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" lifecycle { - cold_storage_after = 30 - delete_after = 160 + cold_storage_after = 7 } } } -`, stringID) +`, rName) } -func testAccBackupPlanWithLifecycleDeleteAfterOnly(stringID string) string { +func testAccAwsBackupPlanConfig_lifecycleDeleteAfterOnly(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" lifecycle { - delete_after = "7" + delete_after = 120 } } } -`, stringID) +`, rName) } -func testAccBackupPlanWithLifecycleColdStorageAfterOnly(stringID string) string { +func testAccAwsBackupPlanConfig_lifecycleColdStorageAfterAndDeleteAfter(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]s" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]s" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]s" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" lifecycle { - cold_storage_after = "7" + cold_storage_after = 30 + delete_after = 180 } } } -`, stringID) +`, rName) } -func testAccBackupPlanWithRules(randInt int) string { +func testAccAwsBackupPlanConfig_recoveryPointTags(rName string) string { return fmt.Sprintf(` resource "aws_backup_vault" "test" { - name = "tf_acc_test_backup_vault_%[1]d" + name = %[1]q } resource "aws_backup_plan" "test" { - name = "tf_acc_test_backup_plan_%[1]d" + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" schedule = "cron(0 12 * * ? *)" + + recovery_point_tags = { + Name = %[1]q + Key1 = "Value1" + Key2 = "Value2a" + } } +} +`, rName) +} + +func testAccAwsBackupPlanConfig_recoveryPointTagsUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_backup_vault" "test" { + name = %[1]q +} + +resource "aws_backup_plan" "test" { + name = %[1]q rule { - rule_name = "tf_acc_test_backup_rule_%[1]d_2" + rule_name = %[1]q target_vault_name = "${aws_backup_vault.test.name}" - schedule = "cron(0 6 * * ? *)" + schedule = "cron(0 12 * * ? *)" + + recovery_point_tags = { + Name = %[1]q + Key2 = "Value2b" + Key3 = "Value3" + } } } -`, randInt) +`, rName) } diff --git a/aws/resource_aws_cloud9_environment_ec2_test.go b/aws/resource_aws_cloud9_environment_ec2_test.go index 0399b78edba..3f24b571443 100644 --- a/aws/resource_aws_cloud9_environment_ec2_test.go +++ b/aws/resource_aws_cloud9_environment_ec2_test.go @@ -18,7 +18,6 @@ func TestAccAWSCloud9EnvironmentEc2_basic(t *testing.T) { rString := acctest.RandString(8) envName := fmt.Sprintf("tf_acc_env_basic_%s", rString) uEnvName := fmt.Sprintf("tf_acc_env_basic_updated_%s", rString) - resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ @@ -36,6 +35,12 @@ func TestAccAWSCloud9EnvironmentEc2_basic(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "owner_arn", regexp.MustCompile(`^arn:`)), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_type"}, + }, { Config: testAccAWSCloud9EnvironmentEc2Config(uEnvName), Check: resource.ComposeTestCheckFunc( @@ -59,7 +64,6 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { description := fmt.Sprintf("Tf Acc Test %s", rString) uDescription := fmt.Sprintf("Tf Acc Test Updated %s", rString) userName := fmt.Sprintf("tf_acc_cloud9_env_%s", rString) - resourceName := "aws_cloud9_environment_ec2.test" resource.ParallelTest(t, resource.TestCase{ @@ -78,6 +82,12 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "type", "ec2"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_type", "automatic_stop_time_minutes", "subnet_id"}, + }, { Config: testAccAWSCloud9EnvironmentEc2AllFieldsConfig(uEnvName, uDescription, userName), Check: resource.ComposeTestCheckFunc( @@ -93,30 +103,6 @@ func TestAccAWSCloud9EnvironmentEc2_allFields(t *testing.T) { }) } -func TestAccAWSCloud9EnvironmentEc2_importBasic(t *testing.T) { - rString := acctest.RandString(8) - name := fmt.Sprintf("tf_acc_api_doc_part_import_%s", rString) - - resourceName := "aws_cloud9_environment_ec2.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t); testAccPreCheckAWSCloud9(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloud9EnvironmentEc2Destroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloud9EnvironmentEc2Config(name), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"instance_type"}, - }, - }, - }) -} - func testAccCheckAWSCloud9EnvironmentEc2Exists(n string, res *cloud9.Environment) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/aws/resource_aws_cloudformation_stack_test.go b/aws/resource_aws_cloudformation_stack_test.go index bc0d3c52274..9431e13c8cb 100644 --- a/aws/resource_aws_cloudformation_stack_test.go +++ b/aws/resource_aws_cloudformation_stack_test.go @@ -11,10 +11,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/terraform" ) -func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { +func TestAccAWSCloudFormationStack_basic(t *testing.T) { + var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) - - resourceName := "aws_cloudformation_stack.network" + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -23,6 +23,9 @@ func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccAWSCloudFormationStackConfig(stackName), + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudFormationStackExists(resourceName, &stack), + ), }, { ResourceName: resourceName, @@ -33,28 +36,10 @@ func TestAccAWSCloudFormationStack_importBasic(t *testing.T) { }) } -func TestAccAWSCloudFormationStack_basic(t *testing.T) { - var stack cloudformation.Stack - stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSCloudFormationDestroy, - Steps: []resource.TestStep{ - { - Config: testAccAWSCloudFormationStackConfig(stackName), - Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), - ), - }, - }, - }) -} - func TestAccAWSCloudFormationStack_disappears(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-basic-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -64,7 +49,7 @@ func TestAccAWSCloudFormationStack_disappears(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), testAccCheckCloudFormationStackDisappears(&stack), ), ExpectNonEmptyPlan: true, @@ -76,6 +61,7 @@ func TestAccAWSCloudFormationStack_disappears(t *testing.T) { func TestAccAWSCloudFormationStack_yaml(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-yaml-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -85,9 +71,14 @@ func TestAccAWSCloudFormationStack_yaml(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_yaml(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.yaml", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -95,6 +86,7 @@ func TestAccAWSCloudFormationStack_yaml(t *testing.T) { func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-default-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -104,9 +96,15 @@ func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_defaultParams(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parameters"}, + }, }, }) } @@ -114,8 +112,9 @@ func TestAccAWSCloudFormationStack_defaultParams(t *testing.T) { func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-all-attributes-%s", acctest.RandString(10)) - + resourceName := "aws_cloudformation_stack.test" expectedPolicyBody := "{\"Statement\":[{\"Action\":\"Update:*\",\"Effect\":\"Deny\",\"Principal\":\"*\",\"Resource\":\"LogicalResourceId/StaticVPC\"},{\"Action\":\"Update:*\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Resource\":\"*\"}]}" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -124,37 +123,43 @@ func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_allAttributesWithBodies(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), + testAccCheckCloudFormationStackExists(resourceName, &stack), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "capabilities.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capabilities.1328347040", "CAPABILITY_IAM"), + resource.TestCheckResourceAttr(resourceName, "disable_rollback", "false"), + resource.TestCheckResourceAttr(resourceName, "notification_arns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.VpcCIDR", "10.0.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "policy_body", expectedPolicyBody), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.First", "Mickey"), + resource.TestCheckResourceAttr(resourceName, "tags.Second", "Mouse"), + resource.TestCheckResourceAttr(resourceName, "timeout_in_minutes", "10"), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "policy_body"}, + }, { Config: testAccAWSCloudFormationStackConfig_allAttributesWithBodies_modified(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "name", stackName), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "capabilities.1328347040", "CAPABILITY_IAM"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "disable_rollback", "false"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "notification_arns.#", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.%", "1"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "parameters.VpcCIDR", "10.0.0.0/16"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "policy_body", expectedPolicyBody), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.%", "2"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.First", "Mickey"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "tags.Second", "Mouse"), - resource.TestCheckResourceAttr("aws_cloudformation_stack.full", "timeout_in_minutes", "10"), + testAccCheckCloudFormationStackExists(resourceName, &stack), + resource.TestCheckResourceAttr(resourceName, "name", stackName), + resource.TestCheckResourceAttr(resourceName, "capabilities.#", "1"), + resource.TestCheckResourceAttr(resourceName, "capabilities.1328347040", "CAPABILITY_IAM"), + resource.TestCheckResourceAttr(resourceName, "disable_rollback", "false"), + resource.TestCheckResourceAttr(resourceName, "notification_arns.#", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.%", "1"), + resource.TestCheckResourceAttr(resourceName, "parameters.VpcCIDR", "10.0.0.0/16"), + resource.TestCheckResourceAttr(resourceName, "policy_body", expectedPolicyBody), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.First", "Mickey"), + resource.TestCheckResourceAttr(resourceName, "tags.Second", "Mouse"), + resource.TestCheckResourceAttr(resourceName, "timeout_in_minutes", "10"), ), }, }, @@ -165,6 +170,7 @@ func TestAccAWSCloudFormationStack_allAttributes(t *testing.T) { func TestAccAWSCloudFormationStack_withParams(t *testing.T) { var stack cloudformation.Stack stackName := fmt.Sprintf("tf-acc-test-with-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -174,13 +180,19 @@ func TestAccAWSCloudFormationStack_withParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_withParams(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters"}, + }, { Config: testAccAWSCloudFormationStackConfig_withParams_modified(stackName), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with_params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -191,6 +203,7 @@ func TestAccAWSCloudFormationStack_withParams(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-url-and-params-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -200,13 +213,19 @@ func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack.json", "13.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -216,6 +235,7 @@ func TestAccAWSCloudFormationStack_withUrl_withParams(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-params-and-yaml-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -223,11 +243,17 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { CheckDestroy: testAccCheckAWSCloudFormationDestroy, Steps: []resource.TestStep{ { - Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams_withYaml(rName, "tf-cf-stack.yaml", "13.0.0.0/16"), + Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams_withYaml(rName, "tf-cf-stack.test", "13.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params-and-yaml", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, }, }) } @@ -236,6 +262,7 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_withYaml(t *testing.T) { func TestAccAWSCloudFormationStack_withUrl_withParams_noUpdate(t *testing.T) { var stack cloudformation.Stack rName := fmt.Sprintf("tf-acc-test-with-params-no-update-%s", acctest.RandString(10)) + resourceName := "aws_cloudformation_stack.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -245,13 +272,19 @@ func TestAccAWSCloudFormationStack_withUrl_withParams_noUpdate(t *testing.T) { { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack-1.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_failure", "parameters", "template_url"}, + }, { Config: testAccAWSCloudFormationStackConfig_templateUrl_withParams(rName, "tf-cf-stack-2.json", "11.0.0.0/16"), Check: resource.ComposeTestCheckFunc( - testAccCheckCloudFormationStackExists("aws_cloudformation_stack.with-url-and-params", &stack), + testAccCheckCloudFormationStackExists(resourceName, &stack), ), }, }, @@ -336,7 +369,7 @@ func testAccCheckCloudFormationStackDisappears(stack *cloudformation.Stack) reso func testAccAWSCloudFormationStackConfig(stackName string) string { return fmt.Sprintf(` -resource "aws_cloudformation_stack" "network" { +resource "aws_cloudformation_stack" "test" { name = "%[1]s" template_body = < 0 { + if v := os.Getenv(k); len(v) != 0 { *dst = v break } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 7b0a942e223..15fa647699f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -73,7 +73,7 @@ type Session struct { // func is called instead of waiting to receive an error until a request is made. func New(cfgs ...*aws.Config) *Session { // load initial config from environment - envCfg := loadEnvConfig() + envCfg, envErr := loadEnvConfig() if envCfg.EnableSharedConfig { var cfg aws.Config @@ -93,17 +93,17 @@ func New(cfgs ...*aws.Config) *Session { // Session creation failed, need to report the error and prevent // any requests from succeeding. s = &Session{Config: defaults.Config()} - s.Config.MergeIn(cfgs...) - s.Config.Logger.Log("ERROR:", msg, "Error:", err) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + s.logDeprecatedNewSessionError(msg, err, cfgs) } return s } s := deprecatedNewSession(cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { if l := s.Config.Logger; l != nil { @@ -112,11 +112,8 @@ func New(cfgs ...*aws.Config) *Session { } else if csmCfg.Enabled { err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) if err != nil { - err = fmt.Errorf("failed to enable CSM, %v", err) - s.Config.Logger.Log("ERROR:", err.Error()) - s.Handlers.Validate.PushBack(func(r *request.Request) { - r.Error = err - }) + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) } } @@ -279,10 +276,17 @@ type Options struct { // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig + var err error if opts.SharedConfigState == SharedConfigEnable { - envCfg = loadSharedEnvConfig() + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } } else { - envCfg = loadEnvConfig() + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } } if len(opts.Profile) != 0 { @@ -550,6 +554,9 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, } } + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, envCfg, sharedCfg) + // Configure credentials if not already set by the user when creating the // Session. if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { @@ -563,6 +570,22 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, return nil } +// mergeSTSRegionalEndpointConfig function merges the STSRegionalEndpoint into cfg from +// envConfig and SharedConfig with envConfig being given precedence over SharedConfig +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + + cfg.STSRegionalEndpoint = envCfg.STSRegionalEndpoint + + if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = sharedCfg.STSRegionalEndpoint + } + + if cfg.STSRegionalEndpoint == endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = endpoints.LegacySTSEndpoint + } + return nil +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -591,37 +614,15 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - // Backwards compatibility, the error will be eaten if user calls ClientConfig - // directly. All SDK services will use ClientconfigWithError. - cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) - - return cfg -} - -func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) - var resolved endpoints.ResolvedEndpoint - var err error - region := aws.StringValue(s.Config.Region) - - if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { - resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region - } else { - resolved, err = s.Config.EndpointResolver.EndpointFor( - serviceName, region, - func(opt *endpoints.Options) { - opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) - opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) - - // Support the condition where the service is modeled but its - // endpoint metadata is not available. - opt.ResolveUnknownService = true - }, - ) + resolved, err := s.resolveEndpoint(service, region, s.Config) + if err != nil && s.Config.Logger != nil { + s.Config.Logger.Log(fmt.Sprintf( + "ERROR: unable to resolve endpoint for service %q, region %q, err: %v", + service, region, err)) } return client.Config{ @@ -631,7 +632,37 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( SigningRegion: resolved.SigningRegion, SigningNameDerived: resolved.SigningNameDerived, SigningName: resolved.SigningName, - }, err + } +} + +func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil } // ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception @@ -641,12 +672,9 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf s = s.Copy(cfgs...) var resolved endpoints.ResolvedEndpoint - - region := aws.StringValue(s.Config.Region) - if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) - resolved.SigningRegion = region + resolved.SigningRegion = aws.StringValue(s.Config.Region) } return client.Config{ @@ -658,3 +686,14 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf SigningName: resolved.SigningName, } } + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index d91ac93a544..8574668960b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -5,6 +5,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/internal/ini" ) @@ -40,6 +41,9 @@ const ( // Web Identity Token File webIdentityTokenFileKey = `web_identity_token_file` // optional + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. @@ -82,12 +86,17 @@ type sharedConfig struct { // // endpoint_discovery_enabled = true EnableEndpointDiscovery *bool - // CSM Options CSMEnabled *bool CSMHost string CSMPort string CSMClientID string + + // Specifies the Regional Endpoint flag for the sdk to resolve the endpoint for a service + // + // sts_regional_endpoints = sts_regional_endpoint + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint } type sharedConfigFile struct { @@ -244,8 +253,16 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.RoleSessionName, section, roleSessionNameKey) updateString(&cfg.SourceProfileName, section, sourceProfileKey) updateString(&cfg.CredentialSource, section, credentialSourceKey) - updateString(&cfg.Region, section, regionKey) + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } } updateString(&cfg.CredentialProcess, section, credentialProcessKey) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 174bc4d013b..8d4da6459ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.25.10" +const SDKVersion = "1.25.22" diff --git a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go index 9817d0c0a5a..ac0bee1943a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acm/service.go @@ -46,11 +46,11 @@ const ( // svc := acm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACM { svc := &ACM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go index 6c231c1d700..c041442ae4d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/acmpca/service.go @@ -46,11 +46,11 @@ const ( // svc := acmpca.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ACMPCA { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ACMPCA { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ACMPCA { svc := &ACMPCA{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-22", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go index f7073bd9961..7d4fa8ea876 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/amplify/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Amplify { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "amplify" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Amplify { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Amplify { svc := &Amplify{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go index 8064d24fc02..cf398a11402 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -46,11 +46,11 @@ const ( // svc := apigateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *APIGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *APIGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *APIGateway { svc := &APIGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go index 02f80c40830..75199eb30aa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigatewayv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApiGatewayV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "apigateway" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApiGatewayV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApiGatewayV2 { svc := &ApiGatewayV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-29", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go index 902d81d426e..0872f66dde3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "application-autoscaling" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { svc := &ApplicationAutoScaling{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go index 72b8030bd46..b90f92dc11f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationinsights/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationInsights { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "applicationinsights" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ApplicationInsights { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationInsights { svc := &ApplicationInsights{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-25", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go index 59daa7c77e8..b9c5476838e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/api.go @@ -3240,11 +3240,11 @@ func (c *AppMesh) UpdateVirtualServiceWithContext(ctx aws.Context, input *Update return out, req.Send() } -// An object representing the access logging information for a virtual node. +// An object that represents the access logging information for a virtual node. type AccessLog struct { _ struct{} `type:"structure"` - // An object representing an access log file. + // An object that represents an access log file. File *FileAccessLog `locationName:"file" type:"structure"` } @@ -3279,8 +3279,8 @@ func (s *AccessLog) SetFile(v *FileAccessLog) *AccessLog { return s } -// An object representing the AWS Cloud Map attribute information for your virtual -// node. +// An object that represents the AWS Cloud Map attribute information for your +// virtual node. type AwsCloudMapInstanceAttribute struct { _ struct{} `type:"structure"` @@ -3335,8 +3335,8 @@ func (s *AwsCloudMapInstanceAttribute) SetValue(v string) *AwsCloudMapInstanceAt return s } -// An object representing the AWS Cloud Map service discovery information for -// your virtual node. +// An object that represents the AWS Cloud Map service discovery information +// for your virtual node. type AwsCloudMapServiceDiscovery struct { _ struct{} `type:"structure"` @@ -3409,12 +3409,12 @@ func (s *AwsCloudMapServiceDiscovery) SetServiceName(v string) *AwsCloudMapServi return s } -// An object representing the backends that a virtual node is expected to send -// outbound traffic to. +// An object that represents the backends that a virtual node is expected to +// send outbound traffic to. type Backend struct { _ struct{} `type:"structure"` - // An object representing a virtual service backend for a virtual node. + // An object that represents a virtual service backend for a virtual node. VirtualService *VirtualServiceBackend `locationName:"virtualService" type:"structure"` } @@ -3457,7 +3457,7 @@ type CreateMeshInput struct { // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` Tags []*TagRef `locationName:"tags" type:"list"` @@ -3531,7 +3531,7 @@ func (s *CreateMeshInput) SetTags(v []*TagRef) *CreateMeshInput { type CreateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -3564,7 +3564,7 @@ type CreateRouteInput struct { // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` @@ -3670,7 +3670,7 @@ func (s *CreateRouteInput) SetVirtualRouterName(v string) *CreateRouteInput { type CreateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -3700,7 +3700,7 @@ type CreateVirtualNodeInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` @@ -3794,7 +3794,7 @@ func (s *CreateVirtualNodeInput) SetVirtualNodeName(v string) *CreateVirtualNode type CreateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -3824,7 +3824,7 @@ type CreateVirtualRouterInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` @@ -3918,7 +3918,7 @@ func (s *CreateVirtualRouterInput) SetVirtualRouterName(v string) *CreateVirtual type CreateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -3948,7 +3948,7 @@ type CreateVirtualServiceInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` @@ -4039,7 +4039,7 @@ func (s *CreateVirtualServiceInput) SetVirtualServiceName(v string) *CreateVirtu type CreateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4103,7 +4103,7 @@ func (s *DeleteMeshInput) SetMeshName(v string) *DeleteMeshInput { type DeleteMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4197,7 +4197,7 @@ func (s *DeleteRouteInput) SetVirtualRouterName(v string) *DeleteRouteInput { type DeleteRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4276,7 +4276,7 @@ func (s *DeleteVirtualNodeInput) SetVirtualNodeName(v string) *DeleteVirtualNode type DeleteVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4355,7 +4355,7 @@ func (s *DeleteVirtualRouterInput) SetVirtualRouterName(v string) *DeleteVirtual type DeleteVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4434,7 +4434,7 @@ func (s *DeleteVirtualServiceInput) SetVirtualServiceName(v string) *DeleteVirtu type DeleteVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4498,7 +4498,7 @@ func (s *DescribeMeshInput) SetMeshName(v string) *DescribeMeshInput { type DescribeMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -4592,7 +4592,7 @@ func (s *DescribeRouteInput) SetVirtualRouterName(v string) *DescribeRouteInput type DescribeRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -4671,7 +4671,7 @@ func (s *DescribeVirtualNodeInput) SetVirtualNodeName(v string) *DescribeVirtual type DescribeVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -4750,7 +4750,7 @@ func (s *DescribeVirtualRouterInput) SetVirtualRouterName(v string) *DescribeVir type DescribeVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -4829,7 +4829,7 @@ func (s *DescribeVirtualServiceInput) SetVirtualServiceName(v string) *DescribeV type DescribeVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -4851,8 +4851,8 @@ func (s *DescribeVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) return s } -// An object representing the DNS service discovery information for your virtual -// node. +// An object that represents the DNS service discovery information for your +// virtual node. type DnsServiceDiscovery struct { _ struct{} `type:"structure"` @@ -4889,7 +4889,7 @@ func (s *DnsServiceDiscovery) SetHostname(v string) *DnsServiceDiscovery { return s } -// An object representing the duration between retry attempts. +// An object that represents a duration of time. type Duration struct { _ struct{} `type:"structure"` @@ -4920,7 +4920,7 @@ func (s *Duration) SetValue(v int64) *Duration { return s } -// An object representing the egress filter rules for a service mesh. +// An object that represents the egress filter rules for a service mesh. type EgressFilter struct { _ struct{} `type:"structure"` @@ -4957,7 +4957,7 @@ func (s *EgressFilter) SetType(v string) *EgressFilter { return s } -// An object representing an access log file. +// An object that represents an access log file. type FileAccessLog struct { _ struct{} `type:"structure"` @@ -4997,8 +4997,436 @@ func (s *FileAccessLog) SetPath(v string) *FileAccessLog { return s } -// An object representing the method and value to match the header value sent -// with a request. Specify one match method. +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. +type GrpcRetryPolicy struct { + _ struct{} `type:"structure"` + + GrpcRetryEvents []*string `locationName:"grpcRetryEvents" min:"1" type:"list"` + + HttpRetryEvents []*string `locationName:"httpRetryEvents" min:"1" type:"list"` + + // MaxRetries is a required field + MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` + + // An object that represents a duration of time. + // + // PerRetryTimeout is a required field + PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` + + TcpRetryEvents []*string `locationName:"tcpRetryEvents" min:"1" type:"list"` +} + +// String returns the string representation +func (s GrpcRetryPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRetryPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRetryPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRetryPolicy"} + if s.GrpcRetryEvents != nil && len(s.GrpcRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GrpcRetryEvents", 1)) + } + if s.HttpRetryEvents != nil && len(s.HttpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HttpRetryEvents", 1)) + } + if s.MaxRetries == nil { + invalidParams.Add(request.NewErrParamRequired("MaxRetries")) + } + if s.PerRetryTimeout == nil { + invalidParams.Add(request.NewErrParamRequired("PerRetryTimeout")) + } + if s.TcpRetryEvents != nil && len(s.TcpRetryEvents) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TcpRetryEvents", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrpcRetryEvents sets the GrpcRetryEvents field's value. +func (s *GrpcRetryPolicy) SetGrpcRetryEvents(v []*string) *GrpcRetryPolicy { + s.GrpcRetryEvents = v + return s +} + +// SetHttpRetryEvents sets the HttpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetHttpRetryEvents(v []*string) *GrpcRetryPolicy { + s.HttpRetryEvents = v + return s +} + +// SetMaxRetries sets the MaxRetries field's value. +func (s *GrpcRetryPolicy) SetMaxRetries(v int64) *GrpcRetryPolicy { + s.MaxRetries = &v + return s +} + +// SetPerRetryTimeout sets the PerRetryTimeout field's value. +func (s *GrpcRetryPolicy) SetPerRetryTimeout(v *Duration) *GrpcRetryPolicy { + s.PerRetryTimeout = v + return s +} + +// SetTcpRetryEvents sets the TcpRetryEvents field's value. +func (s *GrpcRetryPolicy) SetTcpRetryEvents(v []*string) *GrpcRetryPolicy { + s.TcpRetryEvents = v + return s +} + +// An object that represents a GRPC route type. +type GrpcRoute struct { + _ struct{} `type:"structure"` + + // An object that represents the action to take if a match is determined. + // + // Action is a required field + Action *GrpcRouteAction `locationName:"action" type:"structure" required:"true"` + + // An object that represents the criteria for determining a request match. + // + // Match is a required field + Match *GrpcRouteMatch `locationName:"match" type:"structure" required:"true"` + + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. + RetryPolicy *GrpcRetryPolicy `locationName:"retryPolicy" type:"structure"` +} + +// String returns the string representation +func (s GrpcRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRoute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRoute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRoute"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Match == nil { + invalidParams.Add(request.NewErrParamRequired("Match")) + } + if s.Action != nil { + if err := s.Action.Validate(); err != nil { + invalidParams.AddNested("Action", err.(request.ErrInvalidParams)) + } + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + if s.RetryPolicy != nil { + if err := s.RetryPolicy.Validate(); err != nil { + invalidParams.AddNested("RetryPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAction sets the Action field's value. +func (s *GrpcRoute) SetAction(v *GrpcRouteAction) *GrpcRoute { + s.Action = v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRoute) SetMatch(v *GrpcRouteMatch) *GrpcRoute { + s.Match = v + return s +} + +// SetRetryPolicy sets the RetryPolicy field's value. +func (s *GrpcRoute) SetRetryPolicy(v *GrpcRetryPolicy) *GrpcRoute { + s.RetryPolicy = v + return s +} + +// An object that represents the action to take if a match is determined. +type GrpcRouteAction struct { + _ struct{} `type:"structure"` + + // WeightedTargets is a required field + WeightedTargets []*WeightedTarget `locationName:"weightedTargets" min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteAction"} + if s.WeightedTargets == nil { + invalidParams.Add(request.NewErrParamRequired("WeightedTargets")) + } + if s.WeightedTargets != nil && len(s.WeightedTargets) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WeightedTargets", 1)) + } + if s.WeightedTargets != nil { + for i, v := range s.WeightedTargets { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "WeightedTargets", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetWeightedTargets sets the WeightedTargets field's value. +func (s *GrpcRouteAction) SetWeightedTargets(v []*WeightedTarget) *GrpcRouteAction { + s.WeightedTargets = v + return s +} + +// An object that represents the criteria for determining a request match. +type GrpcRouteMatch struct { + _ struct{} `type:"structure"` + + Metadata []*GrpcRouteMetadata `locationName:"metadata" min:"1" type:"list"` + + MethodName *string `locationName:"methodName" min:"1" type:"string"` + + ServiceName *string `locationName:"serviceName" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMatch) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMatch) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMatch) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMatch"} + if s.Metadata != nil && len(s.Metadata) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Metadata", 1)) + } + if s.MethodName != nil && len(*s.MethodName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MethodName", 1)) + } + if s.Metadata != nil { + for i, v := range s.Metadata { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Metadata", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMetadata sets the Metadata field's value. +func (s *GrpcRouteMatch) SetMetadata(v []*GrpcRouteMetadata) *GrpcRouteMatch { + s.Metadata = v + return s +} + +// SetMethodName sets the MethodName field's value. +func (s *GrpcRouteMatch) SetMethodName(v string) *GrpcRouteMatch { + s.MethodName = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *GrpcRouteMatch) SetServiceName(v string) *GrpcRouteMatch { + s.ServiceName = &v + return s +} + +// An object that represents the match metadata for the route. +type GrpcRouteMetadata struct { + _ struct{} `type:"structure"` + + Invert *bool `locationName:"invert" type:"boolean"` + + // An object that represents the match method. Specify one of the match values. + Match *GrpcRouteMetadataMatchMethod `locationName:"match" type:"structure"` + + // Name is a required field + Name *string `locationName:"name" min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GrpcRouteMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadata) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadata) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadata"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.Match != nil { + if err := s.Match.Validate(); err != nil { + invalidParams.AddNested("Match", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInvert sets the Invert field's value. +func (s *GrpcRouteMetadata) SetInvert(v bool) *GrpcRouteMetadata { + s.Invert = &v + return s +} + +// SetMatch sets the Match field's value. +func (s *GrpcRouteMetadata) SetMatch(v *GrpcRouteMetadataMatchMethod) *GrpcRouteMetadata { + s.Match = v + return s +} + +// SetName sets the Name field's value. +func (s *GrpcRouteMetadata) SetName(v string) *GrpcRouteMetadata { + s.Name = &v + return s +} + +// An object that represents the match method. Specify one of the match values. +type GrpcRouteMetadataMatchMethod struct { + _ struct{} `type:"structure"` + + Exact *string `locationName:"exact" min:"1" type:"string"` + + Prefix *string `locationName:"prefix" min:"1" type:"string"` + + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. + Range *MatchRange `locationName:"range" type:"structure"` + + Regex *string `locationName:"regex" min:"1" type:"string"` + + Suffix *string `locationName:"suffix" min:"1" type:"string"` +} + +// String returns the string representation +func (s GrpcRouteMetadataMatchMethod) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GrpcRouteMetadataMatchMethod) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GrpcRouteMetadataMatchMethod) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GrpcRouteMetadataMatchMethod"} + if s.Exact != nil && len(*s.Exact) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Exact", 1)) + } + if s.Prefix != nil && len(*s.Prefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Prefix", 1)) + } + if s.Regex != nil && len(*s.Regex) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Regex", 1)) + } + if s.Suffix != nil && len(*s.Suffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Suffix", 1)) + } + if s.Range != nil { + if err := s.Range.Validate(); err != nil { + invalidParams.AddNested("Range", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExact sets the Exact field's value. +func (s *GrpcRouteMetadataMatchMethod) SetExact(v string) *GrpcRouteMetadataMatchMethod { + s.Exact = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetPrefix(v string) *GrpcRouteMetadataMatchMethod { + s.Prefix = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRange(v *MatchRange) *GrpcRouteMetadataMatchMethod { + s.Range = v + return s +} + +// SetRegex sets the Regex field's value. +func (s *GrpcRouteMetadataMatchMethod) SetRegex(v string) *GrpcRouteMetadataMatchMethod { + s.Regex = &v + return s +} + +// SetSuffix sets the Suffix field's value. +func (s *GrpcRouteMetadataMatchMethod) SetSuffix(v string) *GrpcRouteMetadataMatchMethod { + s.Suffix = &v + return s +} + +// An object that represents the method and value to match with the header value +// sent in a request. Specify one match method. type HeaderMatchMethod struct { _ struct{} `type:"structure"` @@ -5006,9 +5434,10 @@ type HeaderMatchMethod struct { Prefix *string `locationName:"prefix" min:"1" type:"string"` - // The range of values to match on. The first character of the range is included - // in the range, though the last character is not. For example, if the range - // specified were 1-100, only values 1-99 would be matched. + // An object that represents the range of values to match on. The first character + // of the range is included in the range, though the last character is not. + // For example, if the range specified were 1-100, only values 1-99 would be + // matched. Range *MatchRange `locationName:"range" type:"structure"` Regex *string `locationName:"regex" min:"1" type:"string"` @@ -5083,7 +5512,7 @@ func (s *HeaderMatchMethod) SetSuffix(v string) *HeaderMatchMethod { return s } -// An object representing the health check policy for a virtual node's listener. +// An object that represents the health check policy for a virtual node's listener. type HealthCheckPolicy struct { _ struct{} `type:"structure"` @@ -5199,7 +5628,9 @@ func (s *HealthCheckPolicy) SetUnhealthyThreshold(v int64) *HealthCheckPolicy { return s } -// An object that represents a retry policy. +// An object that represents a retry policy. Specify at least one value for +// at least one of the types of RetryEvents, a value for maxRetries, and a value +// for perRetryTimeout. type HttpRetryPolicy struct { _ struct{} `type:"structure"` @@ -5208,7 +5639,7 @@ type HttpRetryPolicy struct { // MaxRetries is a required field MaxRetries *int64 `locationName:"maxRetries" type:"long" required:"true"` - // An object representing the duration between retry attempts. + // An object that represents a duration of time. // // PerRetryTimeout is a required field PerRetryTimeout *Duration `locationName:"perRetryTimeout" type:"structure" required:"true"` @@ -5272,23 +5703,24 @@ func (s *HttpRetryPolicy) SetTcpRetryEvents(v []*string) *HttpRetryPolicy { return s } -// An object representing the HTTP routing specification for a route. +// An object that represents an HTTP or HTTP2 route type. type HttpRoute struct { _ struct{} `type:"structure"` - // An object representing the traffic distribution requirements for matched - // HTTP requests. + // An object that represents the action to take if a match is determined. // // Action is a required field Action *HttpRouteAction `locationName:"action" type:"structure" required:"true"` - // An object representing the requirements for a route to match HTTP requests + // An object that represents the requirements for a route to match HTTP requests // for a virtual router. // // Match is a required field Match *HttpRouteMatch `locationName:"match" type:"structure" required:"true"` - // An object that represents a retry policy. + // An object that represents a retry policy. Specify at least one value for + // at least one of the types of RetryEvents, a value for maxRetries, and a value + // for perRetryTimeout. RetryPolicy *HttpRetryPolicy `locationName:"retryPolicy" type:"structure"` } @@ -5351,8 +5783,7 @@ func (s *HttpRoute) SetRetryPolicy(v *HttpRetryPolicy) *HttpRoute { return s } -// An object representing the traffic distribution requirements for matched -// HTTP requests. +// An object that represents the action to take if a match is determined. type HttpRouteAction struct { _ struct{} `type:"structure"` @@ -5402,14 +5833,14 @@ func (s *HttpRouteAction) SetWeightedTargets(v []*WeightedTarget) *HttpRouteActi return s } -// An object representing the HTTP header in the request. +// An object that represents the HTTP header in the request. type HttpRouteHeader struct { _ struct{} `type:"structure"` Invert *bool `locationName:"invert" type:"boolean"` - // An object representing the method and value to match the header value sent - // with a request. Specify one match method. + // An object that represents the method and value to match with the header value + // sent in a request. Specify one match method. Match *HeaderMatchMethod `locationName:"match" type:"structure"` // Name is a required field @@ -5465,7 +5896,7 @@ func (s *HttpRouteHeader) SetName(v string) *HttpRouteHeader { return s } -// An object representing the requirements for a route to match HTTP requests +// An object that represents the requirements for a route to match HTTP requests // for a virtual router. type HttpRouteMatch struct { _ struct{} `type:"structure"` @@ -6071,14 +6502,14 @@ func (s *ListVirtualServicesOutput) SetVirtualServices(v []*VirtualServiceRef) * return s } -// An object representing a listener for a virtual node. +// An object that represents a listener for a virtual node. type Listener struct { _ struct{} `type:"structure"` - // An object representing the health check policy for a virtual node's listener. + // An object that represents the health check policy for a virtual node's listener. HealthCheck *HealthCheckPolicy `locationName:"healthCheck" type:"structure"` - // An object representing a virtual node or virtual router listener port mapping. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -6129,11 +6560,11 @@ func (s *Listener) SetPortMapping(v *PortMapping) *Listener { return s } -// An object representing the logging information for a virtual node. +// An object that represents the logging information for a virtual node. type Logging struct { _ struct{} `type:"structure"` - // An object representing the access logging information for a virtual node. + // An object that represents the access logging information for a virtual node. AccessLog *AccessLog `locationName:"accessLog" type:"structure"` } @@ -6168,9 +6599,10 @@ func (s *Logging) SetAccessLog(v *AccessLog) *Logging { return s } -// The range of values to match on. The first character of the range is included -// in the range, though the last character is not. For example, if the range -// specified were 1-100, only values 1-99 would be matched. +// An object that represents the range of values to match on. The first character +// of the range is included in the range, though the last character is not. +// For example, if the range specified were 1-100, only values 1-99 would be +// matched. type MatchRange struct { _ struct{} `type:"structure"` @@ -6219,24 +6651,24 @@ func (s *MatchRange) SetStart(v int64) *MatchRange { return s } -// An object representing a service mesh returned by a describe operation. +// An object that represents a service mesh returned by a describe operation. type MeshData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. // // Spec is a required field Spec *MeshSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a service mesh. + // An object that represents the status of a service mesh. // // Status is a required field Status *MeshStatus `locationName:"status" type:"structure" required:"true"` @@ -6276,7 +6708,7 @@ func (s *MeshData) SetStatus(v *MeshStatus) *MeshData { return s } -// An object representing a service mesh returned by a list operation. +// An object that represents a service mesh returned by a list operation. type MeshRef struct { _ struct{} `type:"structure"` @@ -6309,11 +6741,11 @@ func (s *MeshRef) SetMeshName(v string) *MeshRef { return s } -// An object representing the specification of a service mesh. +// An object that represents the specification of a service mesh. type MeshSpec struct { _ struct{} `type:"structure"` - // An object representing the egress filter rules for a service mesh. + // An object that represents the egress filter rules for a service mesh. EgressFilter *EgressFilter `locationName:"egressFilter" type:"structure"` } @@ -6348,7 +6780,7 @@ func (s *MeshSpec) SetEgressFilter(v *EgressFilter) *MeshSpec { return s } -// An object representing the status of a service mesh. +// An object that represents the status of a service mesh. type MeshStatus struct { _ struct{} `type:"structure"` @@ -6371,7 +6803,7 @@ func (s *MeshStatus) SetStatus(v string) *MeshStatus { return s } -// An object representing a virtual node or virtual router listener port mapping. +// An object that represents a port mapping. type PortMapping struct { _ struct{} `type:"structure"` @@ -6423,7 +6855,7 @@ func (s *PortMapping) SetProtocol(v string) *PortMapping { return s } -// An object representing metadata for a resource. +// An object that represents metadata for a resource. type ResourceMetadata struct { _ struct{} `type:"structure"` @@ -6483,14 +6915,14 @@ func (s *ResourceMetadata) SetVersion(v int64) *ResourceMetadata { return s } -// An object representing a route returned by a describe operation. +// An object that represents a route returned by a describe operation. type RouteData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` @@ -6498,12 +6930,12 @@ type RouteData struct { // RouteName is a required field RouteName *string `locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the current status of a route. + // An object that represents the current status of a route. // // Status is a required field Status *RouteStatus `locationName:"status" type:"structure" required:"true"` @@ -6558,7 +6990,7 @@ func (s *RouteData) SetVirtualRouterName(v string) *RouteData { return s } -// An object representing a route returned by a list operation. +// An object that represents a route returned by a list operation. type RouteRef struct { _ struct{} `type:"structure"` @@ -6609,16 +7041,22 @@ func (s *RouteRef) SetVirtualRouterName(v string) *RouteRef { return s } -// An object representing the specification of a route. +// An object that represents a route specification. Specify one route type. type RouteSpec struct { _ struct{} `type:"structure"` - // An object representing the HTTP routing specification for a route. + // An object that represents a GRPC route type. + GrpcRoute *GrpcRoute `locationName:"grpcRoute" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. + Http2Route *HttpRoute `locationName:"http2Route" type:"structure"` + + // An object that represents an HTTP or HTTP2 route type. HttpRoute *HttpRoute `locationName:"httpRoute" type:"structure"` Priority *int64 `locationName:"priority" type:"integer"` - // An object representing the TCP routing specification for a route. + // An object that represents a TCP route type. TcpRoute *TcpRoute `locationName:"tcpRoute" type:"structure"` } @@ -6635,6 +7073,16 @@ func (s RouteSpec) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *RouteSpec) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RouteSpec"} + if s.GrpcRoute != nil { + if err := s.GrpcRoute.Validate(); err != nil { + invalidParams.AddNested("GrpcRoute", err.(request.ErrInvalidParams)) + } + } + if s.Http2Route != nil { + if err := s.Http2Route.Validate(); err != nil { + invalidParams.AddNested("Http2Route", err.(request.ErrInvalidParams)) + } + } if s.HttpRoute != nil { if err := s.HttpRoute.Validate(); err != nil { invalidParams.AddNested("HttpRoute", err.(request.ErrInvalidParams)) @@ -6652,6 +7100,18 @@ func (s *RouteSpec) Validate() error { return nil } +// SetGrpcRoute sets the GrpcRoute field's value. +func (s *RouteSpec) SetGrpcRoute(v *GrpcRoute) *RouteSpec { + s.GrpcRoute = v + return s +} + +// SetHttp2Route sets the Http2Route field's value. +func (s *RouteSpec) SetHttp2Route(v *HttpRoute) *RouteSpec { + s.Http2Route = v + return s +} + // SetHttpRoute sets the HttpRoute field's value. func (s *RouteSpec) SetHttpRoute(v *HttpRoute) *RouteSpec { s.HttpRoute = v @@ -6670,7 +7130,7 @@ func (s *RouteSpec) SetTcpRoute(v *TcpRoute) *RouteSpec { return s } -// An object representing the current status of a route. +// An object that represents the current status of a route. type RouteStatus struct { _ struct{} `type:"structure"` @@ -6694,16 +7154,17 @@ func (s *RouteStatus) SetStatus(v string) *RouteStatus { return s } -// An object representing the service discovery information for a virtual node. +// An object that represents the service discovery information for a virtual +// node. type ServiceDiscovery struct { _ struct{} `type:"structure"` - // An object representing the AWS Cloud Map service discovery information for - // your virtual node. + // An object that represents the AWS Cloud Map service discovery information + // for your virtual node. AwsCloudMap *AwsCloudMapServiceDiscovery `locationName:"awsCloudMap" type:"structure"` - // An object representing the DNS service discovery information for your virtual - // node. + // An object that represents the DNS service discovery information for your + // virtual node. Dns *DnsServiceDiscovery `locationName:"dns" type:"structure"` } @@ -6872,12 +7333,11 @@ func (s TagResourceOutput) GoString() string { return s.String() } -// An object representing the TCP routing specification for a route. +// An object that represents a TCP route type. type TcpRoute struct { _ struct{} `type:"structure"` - // An object representing the traffic distribution requirements for matched - // TCP requests. + // An object that represents the action to take if a match is determined. // // Action is a required field Action *TcpRouteAction `locationName:"action" type:"structure" required:"true"` @@ -6917,8 +7377,7 @@ func (s *TcpRoute) SetAction(v *TcpRouteAction) *TcpRoute { return s } -// An object representing the traffic distribution requirements for matched -// TCP requests. +// An object that represents the action to take if a match is determined. type TcpRouteAction struct { _ struct{} `type:"structure"` @@ -7038,7 +7497,7 @@ type UpdateMeshInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a service mesh. + // An object that represents the specification of a service mesh. Spec *MeshSpec `locationName:"spec" type:"structure"` } @@ -7094,7 +7553,7 @@ func (s *UpdateMeshInput) SetSpec(v *MeshSpec) *UpdateMeshInput { type UpdateMeshOutput struct { _ struct{} `type:"structure" payload:"Mesh"` - // An object representing a service mesh returned by a describe operation. + // An object that represents a service mesh returned by a describe operation. // // Mesh is a required field Mesh *MeshData `locationName:"mesh" type:"structure" required:"true"` @@ -7127,7 +7586,7 @@ type UpdateRouteInput struct { // RouteName is a required field RouteName *string `location:"uri" locationName:"routeName" min:"1" type:"string" required:"true"` - // An object representing the specification of a route. + // An object that represents a route specification. Specify one route type. // // Spec is a required field Spec *RouteSpec `locationName:"spec" type:"structure" required:"true"` @@ -7215,7 +7674,7 @@ func (s *UpdateRouteInput) SetVirtualRouterName(v string) *UpdateRouteInput { type UpdateRouteOutput struct { _ struct{} `type:"structure" payload:"Route"` - // An object representing a route returned by a describe operation. + // An object that represents a route returned by a describe operation. // // Route is a required field Route *RouteData `locationName:"route" type:"structure" required:"true"` @@ -7245,7 +7704,7 @@ type UpdateVirtualNodeInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` @@ -7321,7 +7780,7 @@ func (s *UpdateVirtualNodeInput) SetVirtualNodeName(v string) *UpdateVirtualNode type UpdateVirtualNodeOutput struct { _ struct{} `type:"structure" payload:"VirtualNode"` - // An object representing a virtual node returned by a describe operation. + // An object that represents a virtual node returned by a describe operation. // // VirtualNode is a required field VirtualNode *VirtualNodeData `locationName:"virtualNode" type:"structure" required:"true"` @@ -7351,7 +7810,7 @@ type UpdateVirtualRouterInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` @@ -7427,7 +7886,7 @@ func (s *UpdateVirtualRouterInput) SetVirtualRouterName(v string) *UpdateVirtual type UpdateVirtualRouterOutput struct { _ struct{} `type:"structure" payload:"VirtualRouter"` - // An object representing a virtual router returned by a describe operation. + // An object that represents a virtual router returned by a describe operation. // // VirtualRouter is a required field VirtualRouter *VirtualRouterData `locationName:"virtualRouter" type:"structure" required:"true"` @@ -7457,7 +7916,7 @@ type UpdateVirtualServiceInput struct { // MeshName is a required field MeshName *string `location:"uri" locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` @@ -7533,7 +7992,7 @@ func (s *UpdateVirtualServiceInput) SetVirtualServiceName(v string) *UpdateVirtu type UpdateVirtualServiceOutput struct { _ struct{} `type:"structure" payload:"VirtualService"` - // An object representing a virtual service returned by a describe operation. + // An object that represents a virtual service returned by a describe operation. // // VirtualService is a required field VirtualService *VirtualServiceData `locationName:"virtualService" type:"structure" required:"true"` @@ -7555,24 +8014,24 @@ func (s *UpdateVirtualServiceOutput) SetVirtualService(v *VirtualServiceData) *U return s } -// An object representing a virtual node returned by a describe operation. +// An object that represents a virtual node returned by a describe operation. type VirtualNodeData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual node. + // An object that represents the specification of a virtual node. // // Spec is a required field Spec *VirtualNodeSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the current status of the virtual node. + // An object that represents the current status of the virtual node. // // Status is a required field Status *VirtualNodeStatus `locationName:"status" type:"structure" required:"true"` @@ -7621,7 +8080,7 @@ func (s *VirtualNodeData) SetVirtualNodeName(v string) *VirtualNodeData { return s } -// An object representing a virtual node returned by a list operation. +// An object that represents a virtual node returned by a list operation. type VirtualNodeRef struct { _ struct{} `type:"structure"` @@ -7663,7 +8122,7 @@ func (s *VirtualNodeRef) SetVirtualNodeName(v string) *VirtualNodeRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualNodeServiceProvider struct { _ struct{} `type:"structure"` @@ -7703,7 +8162,7 @@ func (s *VirtualNodeServiceProvider) SetVirtualNodeName(v string) *VirtualNodeSe return s } -// An object representing the specification of a virtual node. +// An object that represents the specification of a virtual node. type VirtualNodeSpec struct { _ struct{} `type:"structure"` @@ -7711,10 +8170,11 @@ type VirtualNodeSpec struct { Listeners []*Listener `locationName:"listeners" type:"list"` - // An object representing the logging information for a virtual node. + // An object that represents the logging information for a virtual node. Logging *Logging `locationName:"logging" type:"structure"` - // An object representing the service discovery information for a virtual node. + // An object that represents the service discovery information for a virtual + // node. ServiceDiscovery *ServiceDiscovery `locationName:"serviceDiscovery" type:"structure"` } @@ -7792,7 +8252,7 @@ func (s *VirtualNodeSpec) SetServiceDiscovery(v *ServiceDiscovery) *VirtualNodeS return s } -// An object representing the current status of the virtual node. +// An object that represents the current status of the virtual node. type VirtualNodeStatus struct { _ struct{} `type:"structure"` @@ -7816,24 +8276,24 @@ func (s *VirtualNodeStatus) SetStatus(v string) *VirtualNodeStatus { return s } -// An object representing a virtual router returned by a describe operation. +// An object that represents a virtual router returned by a describe operation. type VirtualRouterData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual router. + // An object that represents the specification of a virtual router. // // Spec is a required field Spec *VirtualRouterSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a virtual router. + // An object that represents the status of a virtual router. // // Status is a required field Status *VirtualRouterStatus `locationName:"status" type:"structure" required:"true"` @@ -7882,11 +8342,11 @@ func (s *VirtualRouterData) SetVirtualRouterName(v string) *VirtualRouterData { return s } -// An object representing a virtual router listener. +// An object that represents a virtual router listener. type VirtualRouterListener struct { _ struct{} `type:"structure"` - // An object representing a virtual node or virtual router listener port mapping. + // An object that represents a port mapping. // // PortMapping is a required field PortMapping *PortMapping `locationName:"portMapping" type:"structure" required:"true"` @@ -7926,7 +8386,7 @@ func (s *VirtualRouterListener) SetPortMapping(v *PortMapping) *VirtualRouterLis return s } -// An object representing a virtual router returned by a list operation. +// An object that represents a virtual router returned by a list operation. type VirtualRouterRef struct { _ struct{} `type:"structure"` @@ -7968,7 +8428,7 @@ func (s *VirtualRouterRef) SetVirtualRouterName(v string) *VirtualRouterRef { return s } -// An object representing a virtual node service provider. +// An object that represents a virtual node service provider. type VirtualRouterServiceProvider struct { _ struct{} `type:"structure"` @@ -8008,7 +8468,7 @@ func (s *VirtualRouterServiceProvider) SetVirtualRouterName(v string) *VirtualRo return s } -// An object representing the specification of a virtual router. +// An object that represents the specification of a virtual router. type VirtualRouterSpec struct { _ struct{} `type:"structure"` @@ -8054,7 +8514,7 @@ func (s *VirtualRouterSpec) SetListeners(v []*VirtualRouterListener) *VirtualRou return s } -// An object representing the status of a virtual router. +// An object that represents the status of a virtual router. type VirtualRouterStatus struct { _ struct{} `type:"structure"` @@ -8078,7 +8538,7 @@ func (s *VirtualRouterStatus) SetStatus(v string) *VirtualRouterStatus { return s } -// An object representing a virtual service backend for a virtual node. +// An object that represents a virtual service backend for a virtual node. type VirtualServiceBackend struct { _ struct{} `type:"structure"` @@ -8115,24 +8575,24 @@ func (s *VirtualServiceBackend) SetVirtualServiceName(v string) *VirtualServiceB return s } -// An object representing a virtual service returned by a describe operation. +// An object that represents a virtual service returned by a describe operation. type VirtualServiceData struct { _ struct{} `type:"structure"` // MeshName is a required field MeshName *string `locationName:"meshName" min:"1" type:"string" required:"true"` - // An object representing metadata for a resource. + // An object that represents metadata for a resource. // // Metadata is a required field Metadata *ResourceMetadata `locationName:"metadata" type:"structure" required:"true"` - // An object representing the specification of a virtual service. + // An object that represents the specification of a virtual service. // // Spec is a required field Spec *VirtualServiceSpec `locationName:"spec" type:"structure" required:"true"` - // An object representing the status of a virtual service. + // An object that represents the status of a virtual service. // // Status is a required field Status *VirtualServiceStatus `locationName:"status" type:"structure" required:"true"` @@ -8181,14 +8641,14 @@ func (s *VirtualServiceData) SetVirtualServiceName(v string) *VirtualServiceData return s } -// An object representing the provider for a virtual service. +// An object that represents the provider for a virtual service. type VirtualServiceProvider struct { _ struct{} `type:"structure"` - // An object representing a virtual node service provider. + // An object that represents a virtual node service provider. VirtualNode *VirtualNodeServiceProvider `locationName:"virtualNode" type:"structure"` - // An object representing a virtual node service provider. + // An object that represents a virtual node service provider. VirtualRouter *VirtualRouterServiceProvider `locationName:"virtualRouter" type:"structure"` } @@ -8234,7 +8694,7 @@ func (s *VirtualServiceProvider) SetVirtualRouter(v *VirtualRouterServiceProvide return s } -// An object representing a virtual service returned by a list operation. +// An object that represents a virtual service returned by a list operation. type VirtualServiceRef struct { _ struct{} `type:"structure"` @@ -8276,11 +8736,11 @@ func (s *VirtualServiceRef) SetVirtualServiceName(v string) *VirtualServiceRef { return s } -// An object representing the specification of a virtual service. +// An object that represents the specification of a virtual service. type VirtualServiceSpec struct { _ struct{} `type:"structure"` - // An object representing the provider for a virtual service. + // An object that represents the provider for a virtual service. Provider *VirtualServiceProvider `locationName:"provider" type:"structure"` } @@ -8315,7 +8775,7 @@ func (s *VirtualServiceSpec) SetProvider(v *VirtualServiceProvider) *VirtualServ return s } -// An object representing the status of a virtual service. +// An object that represents the status of a virtual service. type VirtualServiceStatus struct { _ struct{} `type:"structure"` @@ -8339,10 +8799,11 @@ func (s *VirtualServiceStatus) SetStatus(v string) *VirtualServiceStatus { return s } -// An object representing a target and its relative weight. Traffic is distributed +// An object that represents a target and its relative weight. Traffic is distributed // across targets according to their relative weight. For example, a weighted // target with a relative weight of 50 receives five times as much traffic as -// one with a relative weight of 10. +// one with a relative weight of 10. The total weight for all targets combined +// must be less than or equal to 100. type WeightedTarget struct { _ struct{} `type:"structure"` @@ -8410,6 +8871,23 @@ const ( EgressFilterTypeDropAll = "DROP_ALL" ) +const ( + // GrpcRetryPolicyEventCancelled is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventCancelled = "cancelled" + + // GrpcRetryPolicyEventDeadlineExceeded is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventDeadlineExceeded = "deadline-exceeded" + + // GrpcRetryPolicyEventInternal is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventInternal = "internal" + + // GrpcRetryPolicyEventResourceExhausted is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventResourceExhausted = "resource-exhausted" + + // GrpcRetryPolicyEventUnavailable is a GrpcRetryPolicyEvent enum value + GrpcRetryPolicyEventUnavailable = "unavailable" +) + const ( // HttpMethodConnect is a HttpMethod enum value HttpMethodConnect = "CONNECT" @@ -8459,9 +8937,15 @@ const ( ) const ( + // PortProtocolGrpc is a PortProtocol enum value + PortProtocolGrpc = "grpc" + // PortProtocolHttp is a PortProtocol enum value PortProtocolHttp = "http" + // PortProtocolHttp2 is a PortProtocol enum value + PortProtocolHttp2 = "http2" + // PortProtocolTcp is a PortProtocol enum value PortProtocolTcp = "tcp" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go index 88ea209b40e..962ada5cea9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appmesh/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppMesh { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appmesh" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppMesh { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppMesh { svc := &AppMesh{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-01-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go index 8a325ba01ba..a04d00dd34e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/api.go @@ -5014,7 +5014,13 @@ type CreateFleetInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation - // creates a new session with temporary credentials. + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -5315,7 +5321,13 @@ type CreateImageBuilderInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the image builder. // To assume a role, the image builder calls the AWS Security Token Service // (STS) AssumeRole API operation and passes the ARN of the role to use. The - // operation creates a new session with temporary credentials. + // operation creates a new session with temporary credentials. AppStream 2.0 + // retrieves the temporary credentials and creates the AppStream_Machine_Role + // credential profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The ARN of the public, private, or shared image to use. @@ -5324,7 +5336,48 @@ type CreateImageBuilderInput struct { // The name of the image used to create the image builder. ImageName *string `min:"1" type:"string"` - // The instance type to use when launching the image builder. + // The instance type to use when launching the image builder. The following + // instance types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -5606,6 +5659,11 @@ type CreateStackInput struct { // The stack name to display. DisplayName *string `type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they click the Send Feedback link. // If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -5657,6 +5715,9 @@ func (s *CreateStackInput) Validate() error { if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -5732,6 +5793,12 @@ func (s *CreateStackInput) SetDisplayName(v string) *CreateStackInput { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *CreateStackInput) SetEmbedHostDomains(v []*string) *CreateStackInput { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *CreateStackInput) SetFeedbackURL(v string) *CreateStackInput { s.FeedbackURL = &v @@ -7947,7 +8014,13 @@ type Fleet struct { // The ARN of the IAM role that is applied to the fleet. To assume a role, the // fleet instance calls the AWS Security Token Service (STS) AssumeRole API // operation and passes the ARN of the role to use. The operation creates a - // new session with temporary credentials. + // new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -7980,7 +8053,48 @@ type Fleet struct { // The name of the image used to create the fleet. ImageName *string `min:"1" type:"string"` - // The instance type to use when launching fleet instances. + // The instance type to use when launching fleet instances. The following instance + // types are available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge // // InstanceType is a required field InstanceType *string `min:"1" type:"string" required:"true"` @@ -8365,7 +8479,13 @@ type ImageBuilder struct { // The ARN of the IAM role that is applied to the image builder. To assume a // role, the image builder calls the AWS Security Token Service (STS) AssumeRole // API operation and passes the ARN of the role to use. The operation creates - // a new session with temporary credentials. + // a new session with temporary credentials. AppStream 2.0 retrieves the temporary + // credentials and creates the AppStream_Machine_Role credential profile on + // the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The ARN of the image from which this builder was created. @@ -8374,7 +8494,48 @@ type ImageBuilder struct { // The image builder errors. ImageBuilderErrors []*ResourceError `type:"list"` - // The instance type for the image builder. + // The instance type for the image builder. The following instance types are + // available: + // + // * stream.standard.medium + // + // * stream.standard.large + // + // * stream.compute.large + // + // * stream.compute.xlarge + // + // * stream.compute.2xlarge + // + // * stream.compute.4xlarge + // + // * stream.compute.8xlarge + // + // * stream.memory.large + // + // * stream.memory.xlarge + // + // * stream.memory.2xlarge + // + // * stream.memory.4xlarge + // + // * stream.memory.8xlarge + // + // * stream.graphics-design.large + // + // * stream.graphics-design.xlarge + // + // * stream.graphics-design.2xlarge + // + // * stream.graphics-design.4xlarge + // + // * stream.graphics-desktop.2xlarge + // + // * stream.graphics-pro.4xlarge + // + // * stream.graphics-pro.8xlarge + // + // * stream.graphics-pro.16xlarge InstanceType *string `min:"1" type:"string"` // The name of the image builder. @@ -9208,6 +9369,11 @@ type Stack struct { // The stack name to display. DisplayName *string `min:"1" type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they click the Send Feedback link. // If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -9277,6 +9443,12 @@ func (s *Stack) SetDisplayName(v string) *Stack { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *Stack) SetEmbedHostDomains(v []*string) *Stack { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *Stack) SetFeedbackURL(v string) *Stack { s.FeedbackURL = &v @@ -9927,7 +10099,13 @@ type UpdateFleetInput struct { // The Amazon Resource Name (ARN) of the IAM role to apply to the fleet. To // assume a role, a fleet instance calls the AWS Security Token Service (STS) // AssumeRole API operation and passes the ARN of the role to use. The operation - // creates a new session with temporary credentials. + // creates a new session with temporary credentials. AppStream 2.0 retrieves + // the temporary credentials and creates the AppStream_Machine_Role credential + // profile on the instance. + // + // For more information, see Using an IAM Role to Grant Permissions to Applications + // and Scripts Running on AppStream 2.0 Streaming Instances (https://docs.aws.amazon.com/appstream2/latest/developerguide/using-iam-roles-to-grant-permissions-to-applications-scripts-streaming-instances.html) + // in the Amazon AppStream 2.0 Administration Guide. IamRoleArn *string `type:"string"` // The amount of time that users can be idle (inactive) before they are disconnected @@ -10280,6 +10458,11 @@ type UpdateStackInput struct { // The stack name to display. DisplayName *string `type:"string"` + // The domains where AppStream 2.0 streaming sessions can be embedded in an + // iframe. You must approve the domains that you want to host embedded AppStream + // 2.0 streaming sessions. + EmbedHostDomains []*string `min:"1" type:"list"` + // The URL that users are redirected to after they choose the Send Feedback // link. If no URL is specified, no Send Feedback link is displayed. FeedbackURL *string `type:"string"` @@ -10316,6 +10499,9 @@ func (s *UpdateStackInput) Validate() error { if s.AccessEndpoints != nil && len(s.AccessEndpoints) < 1 { invalidParams.Add(request.NewErrParamMinLen("AccessEndpoints", 1)) } + if s.EmbedHostDomains != nil && len(s.EmbedHostDomains) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EmbedHostDomains", 1)) + } if s.Name == nil { invalidParams.Add(request.NewErrParamRequired("Name")) } @@ -10403,6 +10589,12 @@ func (s *UpdateStackInput) SetDisplayName(v string) *UpdateStackInput { return s } +// SetEmbedHostDomains sets the EmbedHostDomains field's value. +func (s *UpdateStackInput) SetEmbedHostDomains(v []*string) *UpdateStackInput { + s.EmbedHostDomains = v + return s +} + // SetFeedbackURL sets the FeedbackURL field's value. func (s *UpdateStackInput) SetFeedbackURL(v string) *UpdateStackInput { s.FeedbackURL = &v @@ -11128,6 +11320,9 @@ const ( // StackAttributeUserSettings is a StackAttribute enum value StackAttributeUserSettings = "USER_SETTINGS" + // StackAttributeEmbedHostDomains is a StackAttribute enum value + StackAttributeEmbedHostDomains = "EMBED_HOST_DOMAINS" + // StackAttributeIamRoleArn is a StackAttribute enum value StackAttributeIamRoleArn = "IAM_ROLE_ARN" diff --git a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go index 168cf44e0f7..51a7d38f3e6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appstream/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppStream { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appstream" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppStream { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppStream { svc := &AppStream{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-12-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go index 6dae3d4cca7..e2003998c16 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/appsync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AppSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "appsync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AppSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AppSync { svc := &AppSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go index 6806a62ec95..1f2eaa078b2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/athena/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/athena/service.go @@ -46,11 +46,11 @@ const ( // svc := athena.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Athena { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Athena { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Athena { svc := &Athena{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-05-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index e1da9fd7546..4ce49ffd27d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -46,11 +46,11 @@ const ( // svc := autoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScaling { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AutoScaling { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScaling { svc := &AutoScaling{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go index dd6a1ece467..2c35b02391d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscalingplans/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *AutoScalingPlans { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "autoscaling-plans" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *AutoScalingPlans { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *AutoScalingPlans { svc := &AutoScalingPlans{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go index 2f60e1c1f54..873f24a9f9d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/backup/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/backup/service.go @@ -46,11 +46,11 @@ const ( // svc := backup.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Backup { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Backup { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Backup { svc := &Backup{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go index 9cb0000b92f..d061b1533d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/api.go @@ -2141,19 +2141,35 @@ func (s *ComputeEnvironmentOrder) SetOrder(v int64) *ComputeEnvironmentOrder { type ComputeResource struct { _ struct{} `type:"structure"` + // The allocation strategy to use for the compute resource in case not enough + // instances of the best fitting instance type can be allocated. This could + // be due to availability of the instance type in the region or Amazon EC2 service + // limits (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-resource-limits.html). + // If this is not specified, the default is BEST_FIT, which will use only the + // best fitting instance type, waiting for additional capacity if it's not available. + // This allocation strategy keeps costs lower but can limit scaling. BEST_FIT_PROGRESSIVE + // will select an additional instance type that is large enough to meet the + // requirements of the jobs in the queue, with a preference for an instance + // type with a lower cost. SPOT_CAPACITY_OPTIMIZED is only available for Spot + // Instance compute resources and will select an additional instance type that + // is large enough to meet the requirements of the jobs in the queue, with a + // preference for an instance type that is less likely to be interrupted. + AllocationStrategy *string `locationName:"allocationStrategy" type:"string" enum:"CRAllocationStrategy"` + // The maximum percentage that a Spot Instance price can be when compared with // the On-Demand price for that instance type before instances are launched. // For example, if your maximum percentage is 20%, then the Spot price must - // be below 20% of the current On-Demand price for that EC2 instance. You always - // pay the lowest (market) price and never more than your maximum percentage. - // If you leave this field empty, the default value is 100% of the On-Demand - // price. + // be below 20% of the current On-Demand price for that Amazon EC2 instance. + // You always pay the lowest (market) price and never more than your maximum + // percentage. If you leave this field empty, the default value is 100% of the + // On-Demand price. BidPercentage *int64 `locationName:"bidPercentage" type:"integer"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The EC2 key pair that is used for instances launched in the compute environment. + // The Amazon EC2 key pair that is used for instances launched in the compute + // environment. Ec2KeyPair *string `locationName:"ec2KeyPair" type:"string"` // The Amazon Machine Image (AMI) ID used for instances launched in the compute @@ -2170,8 +2186,8 @@ type ComputeResource struct { InstanceRole *string `locationName:"instanceRole" type:"string" required:"true"` // The instances types that may be launched. You can specify instance families - // to launch any instance type within those families (for example, c4 or p3), - // or you can specify specific sizes within a family (such as c4.8xlarge). You + // to launch any instance type within those families (for example, c5 or p3), + // or you can specify specific sizes within a family (such as c5.8xlarge). You // can also choose optimal to pick instance types (from the C, M, and R instance // families) on the fly that match the demand of your job queues. // @@ -2186,13 +2202,13 @@ type ComputeResource struct { // in the AWS Batch User Guide. LaunchTemplate *LaunchTemplateSpecification `locationName:"launchTemplate" type:"structure"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. // // MaxvCpus is a required field MaxvCpus *int64 `locationName:"maxvCpus" type:"integer" required:"true"` - // The minimum number of EC2 vCPUs that an environment should maintain (even - // if the compute environment is DISABLED). + // The minimum number of Amazon EC2 vCPUs that an environment should maintain + // (even if the compute environment is DISABLED). // // MinvCpus is a required field MinvCpus *int64 `locationName:"minvCpus" type:"integer" required:"true"` @@ -2206,8 +2222,11 @@ type ComputeResource struct { // in the Amazon EC2 User Guide for Linux Instances. PlacementGroup *string `locationName:"placementGroup" type:"string"` - // The EC2 security group that is associated with instances launched in the - // compute environment. + // The Amazon EC2 security groups associated with instances launched in the + // compute environment. One or more security groups must be specified, either + // in securityGroupIds or using a launch template referenced in launchTemplate. + // If security groups are specified using both securityGroupIds and launchTemplate, + // the values in securityGroupIds will be used. SecurityGroupIds []*string `locationName:"securityGroupIds" type:"list"` // The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied @@ -2273,6 +2292,12 @@ func (s *ComputeResource) Validate() error { return nil } +// SetAllocationStrategy sets the AllocationStrategy field's value. +func (s *ComputeResource) SetAllocationStrategy(v string) *ComputeResource { + s.AllocationStrategy = &v + return s +} + // SetBidPercentage sets the BidPercentage field's value. func (s *ComputeResource) SetBidPercentage(v int64) *ComputeResource { s.BidPercentage = &v @@ -2368,13 +2393,13 @@ func (s *ComputeResource) SetType(v string) *ComputeResource { type ComputeResourceUpdate struct { _ struct{} `type:"structure"` - // The desired number of EC2 vCPUS in the compute environment. + // The desired number of Amazon EC2 vCPUS in the compute environment. DesiredvCpus *int64 `locationName:"desiredvCpus" type:"integer"` - // The maximum number of EC2 vCPUs that an environment can reach. + // The maximum number of Amazon EC2 vCPUs that an environment can reach. MaxvCpus *int64 `locationName:"maxvCpus" type:"integer"` - // The minimum number of EC2 vCPUs that an environment should maintain. + // The minimum number of Amazon EC2 vCPUs that an environment should maintain. MinvCpus *int64 `locationName:"minvCpus" type:"integer"` } @@ -6063,6 +6088,17 @@ const ( CETypeUnmanaged = "UNMANAGED" ) +const ( + // CRAllocationStrategyBestFit is a CRAllocationStrategy enum value + CRAllocationStrategyBestFit = "BEST_FIT" + + // CRAllocationStrategyBestFitProgressive is a CRAllocationStrategy enum value + CRAllocationStrategyBestFitProgressive = "BEST_FIT_PROGRESSIVE" + + // CRAllocationStrategySpotCapacityOptimized is a CRAllocationStrategy enum value + CRAllocationStrategySpotCapacityOptimized = "SPOT_CAPACITY_OPTIMIZED" +) + const ( // CRTypeEc2 is a CRType enum value CRTypeEc2 = "EC2" diff --git a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go index 6b10821b33f..e4f63ee8dc3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/batch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/batch/service.go @@ -46,11 +46,11 @@ const ( // svc := batch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Batch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Batch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Batch { svc := &Batch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-08-10", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go index dc10c2aaa1b..3d72b59e981 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/budgets/service.go @@ -46,11 +46,11 @@ const ( // svc := budgets.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Budgets { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Budgets { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Budgets { svc := &Budgets{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-20", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go index 0710f084449..e6eda869ff2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/api.go @@ -1810,11 +1810,15 @@ type EnvironmentLifecycle struct { // The current creation or deletion lifecycle state of the environment. // + // * CREATING: The environment is in the process of being created. + // // * CREATED: The environment was successfully created. // - // * DELETE_FAILED: The environment failed to delete. + // * CREATE_FAILED: The environment failed to be created. // // * DELETING: The environment is in the process of being deleted. + // + // * DELETE_FAILED: The environment failed to delete. Status *string `locationName:"status" type:"string" enum:"EnvironmentLifecycleStatus"` } @@ -2157,9 +2161,15 @@ func (s UpdateEnvironmentOutput) GoString() string { } const ( + // EnvironmentLifecycleStatusCreating is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreating = "CREATING" + // EnvironmentLifecycleStatusCreated is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusCreated = "CREATED" + // EnvironmentLifecycleStatusCreateFailed is a EnvironmentLifecycleStatus enum value + EnvironmentLifecycleStatusCreateFailed = "CREATE_FAILED" + // EnvironmentLifecycleStatusDeleting is a EnvironmentLifecycleStatus enum value EnvironmentLifecycleStatusDeleting = "DELETING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go index 8b9d30d5440..9f6d2871cfc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloud9/service.go @@ -46,11 +46,11 @@ const ( // svc := cloud9.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Cloud9 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Cloud9 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Cloud9 { svc := &Cloud9{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go index 65df49a0cc8..ab37537cc1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudformation.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFormation { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFormation { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFormation { svc := &CloudFormation{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index d692c8f030f..bfb1606aa4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudfront.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudFront { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudFront { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudFront { svc := &CloudFront{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-03-26", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go index c86db6ae7b0..38657874267 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudhsmv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudHSMV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cloudhsm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudHSMV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudHSMV2 { svc := &CloudHSMV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go index 850bc137051..76c21764817 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudsearch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudsearch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudSearch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudSearch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudSearch { svc := &CloudSearch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go index 5f1d4ddc41f..ee863344c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudtrail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudTrail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudTrail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudTrail { svc := &CloudTrail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-11-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index 0cc1304030f..4fcd5618a7b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -2304,19 +2304,20 @@ func (c *CloudWatch) TagResourceRequest(input *TagResourceInput) (req *request.R // TagResource API operation for Amazon CloudWatch. // // Assigns one or more tags (key-value pairs) to the specified CloudWatch resource. +// Currently, the only CloudWatch resources that can be tagged are alarms. +// // Tags can help you organize and categorize your resources. You can also use // them to scope user permissions, by granting a user permission to access or -// change only resources with certain tag values. In CloudWatch, alarms can -// be tagged. +// change only resources with certain tag values. // // Tags don't have any semantic meaning to AWS and are interpreted strictly // as strings of characters. // -// You can use the TagResource action with a resource that already has tags. -// If you specify a new tag key for the resource, this tag is appended to the -// list of tags associated with the resource. If you specify a tag key that -// is already associated with the resource, the new tag value that you specify -// replaces the previous value for that tag. +// You can use the TagResource action with an alarm that already has tags. If +// you specify a new tag key for the alarm, this tag is appended to the list +// of tags associated with the alarm. If you specify a tag key that is already +// associated with the alarm, the new tag value that you specify replaces the +// previous value for that tag. // // You can associate as many as 50 tags with a resource. // @@ -5068,6 +5069,19 @@ type MetricDataQuery struct { // MetricStat but not both. MetricStat *MetricStat `type:"structure"` + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData operation that includes a StorageResolution of 1 second. + // + // Use this field only when you are performing a GetMetricData operation, and + // only when you are specifying the Expression field. Do not use this field + // with a PutMetricAlarm operation or when you are specifying a MetricStat in + // a GetMetricData operation. + Period *int64 `min:"1" type:"integer"` + // When used in GetMetricData, this option indicates whether to return the timestamps // and raw data values of this metric. If you are performing this call just // to do math expressions and do not also need the raw data returned, you can @@ -5101,6 +5115,9 @@ func (s *MetricDataQuery) Validate() error { if s.Id != nil && len(*s.Id) < 1 { invalidParams.Add(request.NewErrParamMinLen("Id", 1)) } + if s.Period != nil && *s.Period < 1 { + invalidParams.Add(request.NewErrParamMinValue("Period", 1)) + } if s.MetricStat != nil { if err := s.MetricStat.Validate(); err != nil { invalidParams.AddNested("MetricStat", err.(request.ErrInvalidParams)) @@ -5137,6 +5154,12 @@ func (s *MetricDataQuery) SetMetricStat(v *MetricStat) *MetricDataQuery { return s } +// SetPeriod sets the Period field's value. +func (s *MetricDataQuery) SetPeriod(v int64) *MetricDataQuery { + s.Period = &v + return s +} + // SetReturnData sets the ReturnData field's value. func (s *MetricDataQuery) SetReturnData(v bool) *MetricDataQuery { s.ReturnData = &v @@ -5400,7 +5423,25 @@ type MetricStat struct { // Metric is a required field Metric *Metric `type:"structure" required:"true"` - // The period, in seconds, to use when retrieving the metric. + // The granularity, in seconds, of the returned data points. For metrics with + // regular resolution, a period can be as short as one minute (60 seconds) and + // must be a multiple of 60. For high-resolution metrics that are collected + // at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, + // or any multiple of 60. High-resolution metrics are those metrics stored by + // a PutMetricData call that includes a StorageResolution of 1 second. + // + // If the StartTime parameter specifies a time stamp that is greater than 3 + // hours ago, you must specify the period as follows or no data points in that + // time range is returned: + // + // * Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds + // (1 minute). + // + // * Start time between 15 and 63 days ago - Use a multiple of 300 seconds + // (5 minutes). + // + // * Start time greater than 63 days ago - Use a multiple of 3600 seconds + // (1 hour). // // Period is a required field Period *int64 `min:"1" type:"integer" required:"true"` @@ -6509,14 +6550,13 @@ func (s *Tag) SetValue(v string) *Tag { type TagResourceInput struct { _ struct{} `type:"structure"` - // The ARN of the CloudWatch resource that you're adding tags to. For more information - // on ARN format, see Example ARNs (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-cloudwatch) - // in the Amazon Web Services General Reference. + // The ARN of the CloudWatch alarm that you're adding tags to. The ARN format + // is arn:aws:cloudwatch:Region:account-id:alarm:alarm-name // // ResourceARN is a required field ResourceARN *string `min:"1" type:"string" required:"true"` - // The list of key-value pairs to associate with the resource. + // The list of key-value pairs to associate with the alarm. // // Tags is a required field Tags []*Tag `type:"list" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go index 0d478662240..9b43ce1f0b3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatch.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatch { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatch { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatch { svc := &CloudWatch{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-08-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go index 2a7f6969cd8..d20dca58bfa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchevents.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchEvents { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchEvents { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchEvents { svc := &CloudWatchEvents{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-07", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index 8d5f929df84..59b3512359d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -46,11 +46,11 @@ const ( // svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CloudWatchLogs { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CloudWatchLogs { svc := &CloudWatchLogs{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-03-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go index b9ff2b6f76e..d04b3dab2b4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codebuild/service.go @@ -46,11 +46,11 @@ const ( // svc := codebuild.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeBuild { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeBuild { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeBuild { svc := &CodeBuild{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-10-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go index c8cad394ab0..63bc31e314c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -46,11 +46,11 @@ const ( // svc := codecommit.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeCommit { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeCommit { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeCommit { svc := &CodeCommit{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-13", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go index dd1eaf41355..dbb6a1bd26c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -46,11 +46,11 @@ const ( // svc := codedeploy.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodeDeploy { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodeDeploy { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodeDeploy { svc := &CodeDeploy{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go index 397a00f9eb2..0bc30449588 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codepipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := codepipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CodePipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CodePipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CodePipeline { svc := &CodePipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-07-09", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go index 9ebae103f36..e5c2011c0a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentity/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentity.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentity { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentity { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentity { svc := &CognitoIdentity{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-06-30", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go index 68efbd80b6e..2c512449a10 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cognitoidentityprovider/service.go @@ -46,11 +46,11 @@ const ( // svc := cognitoidentityprovider.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *CognitoIdentityProvider { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CognitoIdentityProvider { svc := &CognitoIdentityProvider{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go index 2fdea95561f..3cd3cff327d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/configservice/service.go @@ -46,11 +46,11 @@ const ( // svc := configservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ConfigService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ConfigService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ConfigService { svc := &ConfigService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-12", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go index 39e3cedfeae..dca54fd5c95 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/costandusagereportservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *CostandUsageReportServic if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "cur" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *CostandUsageReportService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *CostandUsageReportService { svc := &CostandUsageReportService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-01-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go index 8ee775e03ca..ff3e35d659c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/databasemigrationservice/service.go @@ -46,11 +46,11 @@ const ( // svc := databasemigrationservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DatabaseMigrationService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DatabaseMigrationService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DatabaseMigrationService { svc := &DatabaseMigrationService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-01-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go index ebd5c29b2fc..8f48358db5b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datapipeline/service.go @@ -46,11 +46,11 @@ const ( // svc := datapipeline.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataPipeline { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataPipeline { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataPipeline { svc := &DataPipeline{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-29", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go index 22d8ddcb90c..9f883b9b883 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/datasync/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DataSync { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "datasync" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DataSync { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DataSync { svc := &DataSync{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-09", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go index 545ea0312c3..9a7d90302f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dax/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dax/service.go @@ -46,11 +46,11 @@ const ( // svc := dax.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DAX { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DAX { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DAX { svc := &DAX{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go index 0f2354a4e1c..4e10bf876dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/devicefarm/service.go @@ -46,11 +46,11 @@ const ( // svc := devicefarm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DeviceFarm { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DeviceFarm { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DeviceFarm { svc := &DeviceFarm{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-06-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go index bb182821c59..be5ad460a9f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directconnect/service.go @@ -46,11 +46,11 @@ const ( // svc := directconnect.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectConnect { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectConnect { svc := &DirectConnect{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-10-25", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go index 0743c9632ca..5c4c03db34f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -46,11 +46,11 @@ const ( // svc := directoryservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DirectoryService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DirectoryService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DirectoryService { svc := &DirectoryService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-16", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go index b062fe30d63..ada01c825d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dlm/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DLM { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "dlm" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DLM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DLM { svc := &DLM{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-12", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go index cd0f3d91dc5..c891f31c420 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/docdb/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *DocDB { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DocDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DocDB { svc := &DocDB{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index edcb5b8598e..0400da631df 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -48,11 +48,11 @@ const ( // svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *DynamoDB { svc := &DynamoDB{ Client: client.New( cfg, @@ -61,6 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-08-10", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 8c69b86d27d..3df136f884d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -29541,16 +29541,24 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // You can't register an image where a secondary (non-root) snapshot has AWS // Marketplace product codes. // -// Some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE -// Linux Enterprise Server (SLES), use the EC2 billing product code associated -// with an AMI to verify the subscription status for package updates. Creating -// an AMI from an EBS snapshot does not maintain this billing code, and instances -// launched from such an AMI are not able to connect to package update infrastructure. -// If you purchase a Reserved Instance offering for one of these Linux distributions -// and launch instances using an AMI that does not contain the required billing -// code, your Reserved Instance is not applied to these instances. +// Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) +// and SUSE Linux Enterprise Server (SLES), use the EC2 billing product code +// associated with an AMI to verify the subscription status for package updates. +// To create a new AMI for operating systems that require a billing product +// code, do the following: // -// To create an AMI for operating systems that require a billing code, see CreateImage. +// Launch an instance from an existing AMI with that billing product code. +// +// Customize the instance. +// +// Create a new AMI from the instance using CreateImage to preserve the billing +// product code association. +// +// If you purchase a Reserved Instance to apply to an On-Demand Instance that +// was launched from an AMI with a billing product code, make sure that the +// Reserved Instance has the matching billing product code. If you purchase +// a Reserved Instance without the matching billing product code, the Reserved +// Instance will not be applied to the On-Demand Instance. // // If needed, you can deregister an AMI at any time. Any modifications you make // to an AMI backed by an instance store volume invalidates its registration. @@ -40765,6 +40773,9 @@ type CreateFpgaImageInput struct { // A name for the AFI. Name *string `type:"string"` + + // The tags to apply to the FPGA image during creation. + TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` } // String returns the string representation @@ -40826,6 +40837,12 @@ func (s *CreateFpgaImageInput) SetName(v string) *CreateFpgaImageInput { return s } +// SetTagSpecifications sets the TagSpecifications field's value. +func (s *CreateFpgaImageInput) SetTagSpecifications(v []*TagSpecification) *CreateFpgaImageInput { + s.TagSpecifications = v + return s +} + type CreateFpgaImageOutput struct { _ struct{} `type:"structure"` @@ -90564,10 +90581,11 @@ type TagSpecification struct { // The type of resource to tag. Currently, the resource types that support tagging // on creation are: capacity-reservation | client-vpn-endpoint | dedicated-host - // | fleet | instance | launch-template | snapshot | transit-gateway | transit-gateway-attachment + // | fleet | fpga-image | instance | launch-template | snapshot | traffic-mirror-filter + // | traffic-mirror-session | traffic-mirror-target | transit-gateway | transit-gateway-attachment // | transit-gateway-route-table | volume. // - // To tag a resource after it has been created, see CreateTags. + // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). ResourceType *string `locationName:"resourceType" type:"string" enum:"ResourceType"` // The tags to apply to the resource. diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index 6acbc43fe3d..b2b9fb8c564 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -46,11 +46,11 @@ const ( // svc := ec2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EC2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EC2 { svc := &EC2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 237899c6061..74ca7bb8e2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -442,7 +442,10 @@ func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *reques // CreateRepository API operation for Amazon EC2 Container Registry. // -// Creates an image repository. +// Creates an Amazon Elastic Container Registry (Amazon ECR) repository, where +// users can push and pull Docker images. For more information, see Amazon ECR +// Repositories (https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) +// in the Amazon Elastic Container Registry User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -772,6 +775,156 @@ func (c *ECR) DeleteRepositoryPolicyWithContext(ctx aws.Context, input *DeleteRe return out, req.Send() } +const opDescribeImageScanFindings = "DescribeImageScanFindings" + +// DescribeImageScanFindingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageScanFindings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeImageScanFindings for more information on using the DescribeImageScanFindings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeImageScanFindingsRequest method. +// req, resp := client.DescribeImageScanFindingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindingsRequest(input *DescribeImageScanFindingsInput) (req *request.Request, output *DescribeImageScanFindingsOutput) { + op := &request.Operation{ + Name: opDescribeImageScanFindings, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeImageScanFindingsInput{} + } + + output = &DescribeImageScanFindingsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeImageScanFindings API operation for Amazon EC2 Container Registry. +// +// Describes the image scan findings for the specified image. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation DescribeImageScanFindings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. +// +// * ErrCodeScanNotFoundException "ScanNotFoundException" +// The specified image scan could not be found. Ensure that image scanning is +// enabled on the repository and try again. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/DescribeImageScanFindings +func (c *ECR) DescribeImageScanFindings(input *DescribeImageScanFindingsInput) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + return out, req.Send() +} + +// DescribeImageScanFindingsWithContext is the same as DescribeImageScanFindings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeImageScanFindings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, opts ...request.Option) (*DescribeImageScanFindingsOutput, error) { + req, out := c.DescribeImageScanFindingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeImageScanFindingsPages iterates over the pages of a DescribeImageScanFindings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeImageScanFindings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeImageScanFindings operation. +// pageNum := 0 +// err := client.DescribeImageScanFindingsPages(params, +// func(page *ecr.DescribeImageScanFindingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) DescribeImageScanFindingsPages(input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool) error { + return c.DescribeImageScanFindingsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeImageScanFindingsPagesWithContext same as DescribeImageScanFindingsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) DescribeImageScanFindingsPagesWithContext(ctx aws.Context, input *DescribeImageScanFindingsInput, fn func(*DescribeImageScanFindingsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeImageScanFindingsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeImageScanFindingsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeImageScanFindingsOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeImages = "DescribeImages" // DescribeImagesRequest generates a "aws/request.Request" representing the @@ -1378,6 +1531,12 @@ func (c *ECR) GetLifecyclePolicyPreviewRequest(input *GetLifecyclePolicyPreviewI Name: opGetLifecyclePolicyPreview, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, } if input == nil { @@ -1437,6 +1596,56 @@ func (c *ECR) GetLifecyclePolicyPreviewWithContext(ctx aws.Context, input *GetLi return out, req.Send() } +// GetLifecyclePolicyPreviewPages iterates over the pages of a GetLifecyclePolicyPreview operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLifecyclePolicyPreview method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLifecyclePolicyPreview operation. +// pageNum := 0 +// err := client.GetLifecyclePolicyPreviewPages(params, +// func(page *ecr.GetLifecyclePolicyPreviewOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *ECR) GetLifecyclePolicyPreviewPages(input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool) error { + return c.GetLifecyclePolicyPreviewPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetLifecyclePolicyPreviewPagesWithContext same as GetLifecyclePolicyPreviewPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) GetLifecyclePolicyPreviewPagesWithContext(ctx aws.Context, input *GetLifecyclePolicyPreviewInput, fn func(*GetLifecyclePolicyPreviewOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetLifecyclePolicyPreviewInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetLifecyclePolicyPreviewRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*GetLifecyclePolicyPreviewOutput), !p.HasNextPage()) + } + return p.Err() +} + const opGetRepositoryPolicy = "GetRepositoryPolicy" // GetRepositoryPolicyRequest generates a "aws/request.Request" representing the @@ -1964,6 +2173,93 @@ func (c *ECR) PutImageWithContext(ctx aws.Context, input *PutImageInput, opts .. return out, req.Send() } +const opPutImageScanningConfiguration = "PutImageScanningConfiguration" + +// PutImageScanningConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutImageScanningConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutImageScanningConfiguration for more information on using the PutImageScanningConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutImageScanningConfigurationRequest method. +// req, resp := client.PutImageScanningConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfigurationRequest(input *PutImageScanningConfigurationInput) (req *request.Request, output *PutImageScanningConfigurationOutput) { + op := &request.Operation{ + Name: opPutImageScanningConfiguration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutImageScanningConfigurationInput{} + } + + output = &PutImageScanningConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutImageScanningConfiguration API operation for Amazon EC2 Container Registry. +// +// Updates the image scanning configuration for a repository. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation PutImageScanningConfiguration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/PutImageScanningConfiguration +func (c *ECR) PutImageScanningConfiguration(input *PutImageScanningConfigurationInput) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + return out, req.Send() +} + +// PutImageScanningConfigurationWithContext is the same as PutImageScanningConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutImageScanningConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) PutImageScanningConfigurationWithContext(ctx aws.Context, input *PutImageScanningConfigurationInput, opts ...request.Option) (*PutImageScanningConfigurationOutput, error) { + req, out := c.PutImageScanningConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opPutImageTagMutability = "PutImageTagMutability" // PutImageTagMutabilityRequest generates a "aws/request.Request" representing the @@ -2228,59 +2524,58 @@ func (c *ECR) SetRepositoryPolicyWithContext(ctx aws.Context, input *SetReposito return out, req.Send() } -const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" +const opStartImageScan = "StartImageScan" -// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the -// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// StartImageScanRequest generates a "aws/request.Request" representing the +// client's request for the StartImageScan operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// See StartImageScan for more information on using the StartImageScan // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. -// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// // Example sending a request using the StartImageScanRequest method. +// req, resp := client.StartImageScanRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScanRequest(input *StartImageScanInput) (req *request.Request, output *StartImageScanOutput) { op := &request.Operation{ - Name: opStartLifecyclePolicyPreview, + Name: opStartImageScan, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &StartLifecyclePolicyPreviewInput{} + input = &StartImageScanInput{} } - output = &StartLifecyclePolicyPreviewOutput{} + output = &StartImageScanOutput{} req = c.newRequest(op, input, output) return } -// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// StartImageScan API operation for Amazon EC2 Container Registry. // -// Starts a preview of the specified lifecycle policy. This allows you to see -// the results before creating the lifecycle policy. +// Starts an image vulnerability scan. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon EC2 Container Registry's -// API operation StartLifecyclePolicyPreview for usage and error information. +// API operation StartImageScan for usage and error information. // // Returned Error Codes: // * ErrCodeServerException "ServerException" @@ -2294,36 +2589,127 @@ func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPrev // The specified repository could not be found. Check the spelling of the specified // repository and ensure that you are performing operations on the correct registry. // -// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" -// The lifecycle policy could not be found, and no policy is set to the repository. -// -// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" -// The previous lifecycle policy preview request has not completed. Please try -// again later. +// * ErrCodeImageNotFoundException "ImageNotFoundException" +// The image requested does not exist in the specified repository. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview -func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartImageScan +func (c *ECR) StartImageScan(input *StartImageScanInput) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) return out, req.Send() } -// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// StartImageScanWithContext is the same as StartImageScan with the addition of // the ability to pass a context and additional request options. // -// See StartLifecyclePolicyPreview for details on how to use this API operation. +// See StartImageScan for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { - req, out := c.StartLifecyclePolicyPreviewRequest(input) +func (c *ECR) StartImageScanWithContext(ctx aws.Context, input *StartImageScanInput, opts ...request.Option) (*StartImageScanOutput, error) { + req, out := c.StartImageScanRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTagResource = "TagResource" +const opStartLifecyclePolicyPreview = "StartLifecyclePolicyPreview" + +// StartLifecyclePolicyPreviewRequest generates a "aws/request.Request" representing the +// client's request for the StartLifecyclePolicyPreview operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartLifecyclePolicyPreview for more information on using the StartLifecyclePolicyPreview +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartLifecyclePolicyPreviewRequest method. +// req, resp := client.StartLifecyclePolicyPreviewRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreviewRequest(input *StartLifecyclePolicyPreviewInput) (req *request.Request, output *StartLifecyclePolicyPreviewOutput) { + op := &request.Operation{ + Name: opStartLifecyclePolicyPreview, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartLifecyclePolicyPreviewInput{} + } + + output = &StartLifecyclePolicyPreviewOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartLifecyclePolicyPreview API operation for Amazon EC2 Container Registry. +// +// Starts a preview of the specified lifecycle policy. This allows you to see +// the results before creating the lifecycle policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon EC2 Container Registry's +// API operation StartLifecyclePolicyPreview for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServerException "ServerException" +// These errors are usually caused by a server-side issue. +// +// * ErrCodeInvalidParameterException "InvalidParameterException" +// The specified parameter is invalid. Review the available parameters for the +// API request. +// +// * ErrCodeRepositoryNotFoundException "RepositoryNotFoundException" +// The specified repository could not be found. Check the spelling of the specified +// repository and ensure that you are performing operations on the correct registry. +// +// * ErrCodeLifecyclePolicyNotFoundException "LifecyclePolicyNotFoundException" +// The lifecycle policy could not be found, and no policy is set to the repository. +// +// * ErrCodeLifecyclePolicyPreviewInProgressException "LifecyclePolicyPreviewInProgressException" +// The previous lifecycle policy preview request has not completed. Please try +// again later. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/ecr-2015-09-21/StartLifecyclePolicyPreview +func (c *ECR) StartLifecyclePolicyPreview(input *StartLifecyclePolicyPreviewInput) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + return out, req.Send() +} + +// StartLifecyclePolicyPreviewWithContext is the same as StartLifecyclePolicyPreview with the addition of +// the ability to pass a context and additional request options. +// +// See StartLifecyclePolicyPreview for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ECR) StartLifecyclePolicyPreviewWithContext(ctx aws.Context, input *StartLifecyclePolicyPreviewInput, opts ...request.Option) (*StartLifecyclePolicyPreviewOutput, error) { + req, out := c.StartLifecyclePolicyPreviewRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagResource = "TagResource" // TagResourceRequest generates a "aws/request.Request" representing the // client's request for the TagResource operation. The "output" return @@ -2623,6 +3009,41 @@ func (c *ECR) UploadLayerPartWithContext(ctx aws.Context, input *UploadLayerPart return out, req.Send() } +// This data type is used in the ImageScanFinding data type. +type Attribute struct { + _ struct{} `type:"structure"` + + // The attribute key. + // + // Key is a required field + Key *string `locationName:"key" min:"1" type:"string" required:"true"` + + // The value assigned to the attribute key. + Value *string `locationName:"value" min:"1" type:"string"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *Attribute) SetKey(v string) *Attribute { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Attribute) SetValue(v string) *Attribute { + s.Value = &v + return s +} + // An object representing authorization data for an Amazon ECR registry. type AuthorizationData struct { _ struct{} `type:"structure"` @@ -3146,6 +3567,11 @@ func (s *CompleteLayerUploadOutput) SetUploadId(v string) *CompleteLayerUploadOu type CreateRepositoryInput struct { _ struct{} `type:"structure"` + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + // The tag mutability setting for the repository. If this parameter is omitted, // the default setting of MUTABLE will be used which will allow image tags to // be overwritten. If IMMUTABLE is specified, all image tags within the repository @@ -3192,6 +3618,12 @@ func (s *CreateRepositoryInput) Validate() error { return nil } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *CreateRepositoryInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *CreateRepositoryInput { + s.ImageScanningConfiguration = v + return s +} + // SetImageTagMutability sets the ImageTagMutability field's value. func (s *CreateRepositoryInput) SetImageTagMutability(v string) *CreateRepositoryInput { s.ImageTagMutability = &v @@ -3511,6 +3943,180 @@ func (s *DeleteRepositoryPolicyOutput) SetRepositoryName(v string) *DeleteReposi return s } +type DescribeImageScanFindingsInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The maximum number of image scan results returned by DescribeImageScanFindings + // in paginated output. When this parameter is used, DescribeImageScanFindings + // only returns maxResults results in a single page along with a nextToken response + // element. The remaining results of the initial request can be seen by sending + // another DescribeImageScanFindings request with the returned nextToken value. + // This value can be between 1 and 1000. If this parameter is not used, then + // DescribeImageScanFindings returns up to 100 results and a nextToken value, + // if applicable. + MaxResults *int64 `locationName:"maxResults" min:"1" type:"integer"` + + // The nextToken value returned from a previous paginated DescribeImageScanFindings + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The AWS account ID associated with the registry that contains the repository + // in which to describe the image scan findings for. If you do not specify a + // registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository for the image for which to describe the scan findings. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeImageScanFindingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeImageScanFindingsInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsInput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsInput { + s.ImageId = v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *DescribeImageScanFindingsInput) SetMaxResults(v int64) *DescribeImageScanFindingsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsInput) SetNextToken(v string) *DescribeImageScanFindingsInput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsInput) SetRegistryId(v string) *DescribeImageScanFindingsInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsInput) SetRepositoryName(v string) *DescribeImageScanFindingsInput { + s.RepositoryName = &v + return s +} + +type DescribeImageScanFindingsOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The information contained in the image scan findings. + ImageScanFindings *ImageScanFindings `locationName:"imageScanFindings" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The nextToken value to include in a future DescribeImageScanFindings request. + // When the results of a DescribeImageScanFindings request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s DescribeImageScanFindingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeImageScanFindingsOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *DescribeImageScanFindingsOutput) SetImageId(v *ImageIdentifier) *DescribeImageScanFindingsOutput { + s.ImageId = v + return s +} + +// SetImageScanFindings sets the ImageScanFindings field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanFindings(v *ImageScanFindings) *DescribeImageScanFindingsOutput { + s.ImageScanFindings = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *DescribeImageScanFindingsOutput) SetImageScanStatus(v *ImageScanStatus) *DescribeImageScanFindingsOutput { + s.ImageScanStatus = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *DescribeImageScanFindingsOutput) SetNextToken(v string) *DescribeImageScanFindingsOutput { + s.NextToken = &v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *DescribeImageScanFindingsOutput) SetRegistryId(v string) *DescribeImageScanFindingsOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *DescribeImageScanFindingsOutput) SetRepositoryName(v string) *DescribeImageScanFindingsOutput { + s.RepositoryName = &v + return s +} + // An object representing a filter on a DescribeImages operation. type DescribeImagesFilter struct { _ struct{} `type:"structure"` @@ -4361,7 +4967,7 @@ type Image struct { ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` // The image manifest associated with the image. - ImageManifest *string `locationName:"imageManifest" type:"string"` + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string"` // The AWS account ID associated with the registry containing the image. RegistryId *string `locationName:"registryId" type:"string"` @@ -4415,6 +5021,12 @@ type ImageDetail struct { // the current image was pushed to the repository. ImagePushedAt *time.Time `locationName:"imagePushedAt" type:"timestamp"` + // A summary of the last completed image scan. + ImageScanFindingsSummary *ImageScanFindingsSummary `locationName:"imageScanFindingsSummary" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + // The size, in bytes, of the image in the repository. // // Beginning with Docker version 1.9, the Docker client compresses image layers @@ -4455,13 +5067,25 @@ func (s *ImageDetail) SetImagePushedAt(v time.Time) *ImageDetail { return s } -// SetImageSizeInBytes sets the ImageSizeInBytes field's value. -func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { - s.ImageSizeInBytes = &v +// SetImageScanFindingsSummary sets the ImageScanFindingsSummary field's value. +func (s *ImageDetail) SetImageScanFindingsSummary(v *ImageScanFindingsSummary) *ImageDetail { + s.ImageScanFindingsSummary = v return s } -// SetImageTags sets the ImageTags field's value. +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *ImageDetail) SetImageScanStatus(v *ImageScanStatus) *ImageDetail { + s.ImageScanStatus = v + return s +} + +// SetImageSizeInBytes sets the ImageSizeInBytes field's value. +func (s *ImageDetail) SetImageSizeInBytes(v int64) *ImageDetail { + s.ImageSizeInBytes = &v + return s +} + +// SetImageTags sets the ImageTags field's value. func (s *ImageDetail) SetImageTags(v []*string) *ImageDetail { s.ImageTags = v return s @@ -4567,6 +5191,220 @@ func (s *ImageIdentifier) SetImageTag(v string) *ImageIdentifier { return s } +// Contains information about an image scan finding. +type ImageScanFinding struct { + _ struct{} `type:"structure"` + + // A collection of attributes of the host from which the finding is generated. + Attributes []*Attribute `locationName:"attributes" type:"list"` + + // The description of the finding. + Description *string `locationName:"description" type:"string"` + + // The name associated with the finding, usually a CVE number. + Name *string `locationName:"name" type:"string"` + + // The finding severity. + Severity *string `locationName:"severity" type:"string" enum:"FindingSeverity"` + + // A link containing additional details about the security vulnerability. + Uri *string `locationName:"uri" type:"string"` +} + +// String returns the string representation +func (s ImageScanFinding) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFinding) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *ImageScanFinding) SetAttributes(v []*Attribute) *ImageScanFinding { + s.Attributes = v + return s +} + +// SetDescription sets the Description field's value. +func (s *ImageScanFinding) SetDescription(v string) *ImageScanFinding { + s.Description = &v + return s +} + +// SetName sets the Name field's value. +func (s *ImageScanFinding) SetName(v string) *ImageScanFinding { + s.Name = &v + return s +} + +// SetSeverity sets the Severity field's value. +func (s *ImageScanFinding) SetSeverity(v string) *ImageScanFinding { + s.Severity = &v + return s +} + +// SetUri sets the Uri field's value. +func (s *ImageScanFinding) SetUri(v string) *ImageScanFinding { + s.Uri = &v + return s +} + +// The details of an image scan. +type ImageScanFindings struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The findings from the image scan. + Findings []*ImageScanFinding `locationName:"findings" type:"list"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindings) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindings) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindings) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindings { + s.FindingSeverityCounts = v + return s +} + +// SetFindings sets the Findings field's value. +func (s *ImageScanFindings) SetFindings(v []*ImageScanFinding) *ImageScanFindings { + s.Findings = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindings) SetImageScanCompletedAt(v time.Time) *ImageScanFindings { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindings) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindings { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// A summary of the last completed image scan. +type ImageScanFindingsSummary struct { + _ struct{} `type:"structure"` + + // The image vulnerability counts, sorted by severity. + FindingSeverityCounts map[string]*int64 `locationName:"findingSeverityCounts" type:"map"` + + // The time of the last completed image scan. + ImageScanCompletedAt *time.Time `locationName:"imageScanCompletedAt" type:"timestamp"` + + // The time when the vulnerability data was last scanned. + VulnerabilitySourceUpdatedAt *time.Time `locationName:"vulnerabilitySourceUpdatedAt" type:"timestamp"` +} + +// String returns the string representation +func (s ImageScanFindingsSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanFindingsSummary) GoString() string { + return s.String() +} + +// SetFindingSeverityCounts sets the FindingSeverityCounts field's value. +func (s *ImageScanFindingsSummary) SetFindingSeverityCounts(v map[string]*int64) *ImageScanFindingsSummary { + s.FindingSeverityCounts = v + return s +} + +// SetImageScanCompletedAt sets the ImageScanCompletedAt field's value. +func (s *ImageScanFindingsSummary) SetImageScanCompletedAt(v time.Time) *ImageScanFindingsSummary { + s.ImageScanCompletedAt = &v + return s +} + +// SetVulnerabilitySourceUpdatedAt sets the VulnerabilitySourceUpdatedAt field's value. +func (s *ImageScanFindingsSummary) SetVulnerabilitySourceUpdatedAt(v time.Time) *ImageScanFindingsSummary { + s.VulnerabilitySourceUpdatedAt = &v + return s +} + +// The current status of an image scan. +type ImageScanStatus struct { + _ struct{} `type:"structure"` + + // The description of the image scan status. + Description *string `locationName:"description" type:"string"` + + // The current state of an image scan. + Status *string `locationName:"status" type:"string" enum:"ScanStatus"` +} + +// String returns the string representation +func (s ImageScanStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanStatus) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *ImageScanStatus) SetDescription(v string) *ImageScanStatus { + s.Description = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImageScanStatus) SetStatus(v string) *ImageScanStatus { + s.Status = &v + return s +} + +// The image scanning configuration for a repository. +type ImageScanningConfiguration struct { + _ struct{} `type:"structure"` + + // The setting that determines whether images are scanned after being pushed + // to a repository. If set to true, images will be scanned after being pushed. + // If this parameter is not specified, it will default to false and images will + // not be scanned unless a scan is manually started with the StartImageScan + // API. + ScanOnPush *bool `locationName:"scanOnPush" type:"boolean"` +} + +// String returns the string representation +func (s ImageScanningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImageScanningConfiguration) GoString() string { + return s.String() +} + +// SetScanOnPush sets the ScanOnPush field's value. +func (s *ImageScanningConfiguration) SetScanOnPush(v bool) *ImageScanningConfiguration { + s.ScanOnPush = &v + return s +} + type InitiateLayerUploadInput struct { _ struct{} `type:"structure"` @@ -5101,7 +5939,7 @@ type PutImageInput struct { // The image manifest corresponding to the image to be uploaded. // // ImageManifest is a required field - ImageManifest *string `locationName:"imageManifest" type:"string" required:"true"` + ImageManifest *string `locationName:"imageManifest" min:"1" type:"string" required:"true"` // The tag to associate with the image. This parameter is required for images // that use the Docker Image Manifest V2 Schema 2 or OCI formats. @@ -5134,6 +5972,9 @@ func (s *PutImageInput) Validate() error { if s.ImageManifest == nil { invalidParams.Add(request.NewErrParamRequired("ImageManifest")) } + if s.ImageManifest != nil && len(*s.ImageManifest) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImageManifest", 1)) + } if s.ImageTag != nil && len(*s.ImageTag) < 1 { invalidParams.Add(request.NewErrParamMinLen("ImageTag", 1)) } @@ -5197,6 +6038,116 @@ func (s *PutImageOutput) SetImage(v *Image) *PutImageOutput { return s } +type PutImageScanningConfigurationInput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration for the repository. This setting determines + // whether images are scanned for known vulnerabilities after being pushed to + // the repository. + // + // ImageScanningConfiguration is a required field + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to update the image scanning configuration setting. If you do not + // specify a registry, the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository in which to update the image scanning configuration + // setting. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutImageScanningConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutImageScanningConfigurationInput"} + if s.ImageScanningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ImageScanningConfiguration")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationInput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationInput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationInput) SetRegistryId(v string) *PutImageScanningConfigurationInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationInput) SetRepositoryName(v string) *PutImageScanningConfigurationInput { + s.RepositoryName = &v + return s +} + +type PutImageScanningConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The image scanning configuration setting for the repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s PutImageScanningConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutImageScanningConfigurationOutput) GoString() string { + return s.String() +} + +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *PutImageScanningConfigurationOutput) SetImageScanningConfiguration(v *ImageScanningConfiguration) *PutImageScanningConfigurationOutput { + s.ImageScanningConfiguration = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *PutImageScanningConfigurationOutput) SetRegistryId(v string) *PutImageScanningConfigurationOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *PutImageScanningConfigurationOutput) SetRepositoryName(v string) *PutImageScanningConfigurationOutput { + s.RepositoryName = &v + return s +} + type PutImageTagMutabilityInput struct { _ struct{} `type:"structure"` @@ -5422,6 +6373,9 @@ type Repository struct { // The date and time, in JavaScript date format, when the repository was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp"` + // The image scanning configuration for a repository. + ImageScanningConfiguration *ImageScanningConfiguration `locationName:"imageScanningConfiguration" type:"structure"` + // The tag mutability setting for the repository. ImageTagMutability *string `locationName:"imageTagMutability" type:"string" enum:"ImageTagMutability"` @@ -5458,6 +6412,12 @@ func (s *Repository) SetCreatedAt(v time.Time) *Repository { return s } +// SetImageScanningConfiguration sets the ImageScanningConfiguration field's value. +func (s *Repository) SetImageScanningConfiguration(v *ImageScanningConfiguration) *Repository { + s.ImageScanningConfiguration = v + return s +} + // SetImageTagMutability sets the ImageTagMutability field's value. func (s *Repository) SetImageTagMutability(v string) *Repository { s.ImageTagMutability = &v @@ -5607,6 +6567,127 @@ func (s *SetRepositoryPolicyOutput) SetRepositoryName(v string) *SetRepositoryPo return s } +type StartImageScanInput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + // + // ImageId is a required field + ImageId *ImageIdentifier `locationName:"imageId" type:"structure" required:"true"` + + // The AWS account ID associated with the registry that contains the repository + // in which to start an image scan request. If you do not specify a registry, + // the default registry is assumed. + RegistryId *string `locationName:"registryId" type:"string"` + + // The name of the repository that contains the images to scan. + // + // RepositoryName is a required field + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s StartImageScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartImageScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartImageScanInput"} + if s.ImageId == nil { + invalidParams.Add(request.NewErrParamRequired("ImageId")) + } + if s.RepositoryName == nil { + invalidParams.Add(request.NewErrParamRequired("RepositoryName")) + } + if s.RepositoryName != nil && len(*s.RepositoryName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RepositoryName", 2)) + } + if s.ImageId != nil { + if err := s.ImageId.Validate(); err != nil { + invalidParams.AddNested("ImageId", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanInput) SetImageId(v *ImageIdentifier) *StartImageScanInput { + s.ImageId = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanInput) SetRegistryId(v string) *StartImageScanInput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanInput) SetRepositoryName(v string) *StartImageScanInput { + s.RepositoryName = &v + return s +} + +type StartImageScanOutput struct { + _ struct{} `type:"structure"` + + // An object with identifying information for an Amazon ECR image. + ImageId *ImageIdentifier `locationName:"imageId" type:"structure"` + + // The current state of the scan. + ImageScanStatus *ImageScanStatus `locationName:"imageScanStatus" type:"structure"` + + // The registry ID associated with the request. + RegistryId *string `locationName:"registryId" type:"string"` + + // The repository name associated with the request. + RepositoryName *string `locationName:"repositoryName" min:"2" type:"string"` +} + +// String returns the string representation +func (s StartImageScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartImageScanOutput) GoString() string { + return s.String() +} + +// SetImageId sets the ImageId field's value. +func (s *StartImageScanOutput) SetImageId(v *ImageIdentifier) *StartImageScanOutput { + s.ImageId = v + return s +} + +// SetImageScanStatus sets the ImageScanStatus field's value. +func (s *StartImageScanOutput) SetImageScanStatus(v *ImageScanStatus) *StartImageScanOutput { + s.ImageScanStatus = v + return s +} + +// SetRegistryId sets the RegistryId field's value. +func (s *StartImageScanOutput) SetRegistryId(v string) *StartImageScanOutput { + s.RegistryId = &v + return s +} + +// SetRepositoryName sets the RepositoryName field's value. +func (s *StartImageScanOutput) SetRepositoryName(v string) *StartImageScanOutput { + s.RepositoryName = &v + return s +} + type StartLifecyclePolicyPreviewInput struct { _ struct{} `type:"structure"` @@ -6055,6 +7136,26 @@ func (s *UploadLayerPartOutput) SetUploadId(v string) *UploadLayerPartOutput { return s } +const ( + // FindingSeverityInformational is a FindingSeverity enum value + FindingSeverityInformational = "INFORMATIONAL" + + // FindingSeverityLow is a FindingSeverity enum value + FindingSeverityLow = "LOW" + + // FindingSeverityMedium is a FindingSeverity enum value + FindingSeverityMedium = "MEDIUM" + + // FindingSeverityHigh is a FindingSeverity enum value + FindingSeverityHigh = "HIGH" + + // FindingSeverityCritical is a FindingSeverity enum value + FindingSeverityCritical = "CRITICAL" + + // FindingSeverityUndefined is a FindingSeverity enum value + FindingSeverityUndefined = "UNDEFINED" +) + const ( // ImageActionTypeExpire is a ImageActionType enum value ImageActionTypeExpire = "EXPIRE" @@ -6115,6 +7216,17 @@ const ( LifecyclePolicyPreviewStatusFailed = "FAILED" ) +const ( + // ScanStatusInProgress is a ScanStatus enum value + ScanStatusInProgress = "IN_PROGRESS" + + // ScanStatusComplete is a ScanStatus enum value + ScanStatusComplete = "COMPLETE" + + // ScanStatusFailed is a ScanStatus enum value + ScanStatusFailed = "FAILED" +) + const ( // TagStatusTagged is a TagStatus enum value TagStatusTagged = "TAGGED" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go index c1f18605ca6..786759af0fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/errors.go @@ -140,6 +140,13 @@ const ( // repository policy. ErrCodeRepositoryPolicyNotFoundException = "RepositoryPolicyNotFoundException" + // ErrCodeScanNotFoundException for service response error code + // "ScanNotFoundException". + // + // The specified image scan could not be found. Ensure that image scanning is + // enabled on the repository and try again. + ErrCodeScanNotFoundException = "ScanNotFoundException" + // ErrCodeServerException for service response error code // "ServerException". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 3eba7f696b6..b1ee8a2c723 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECR { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ecr" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECR { svc := &ECR{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-09-21", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go index c268614ecb9..a2eef295918 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -46,11 +46,11 @@ const ( // svc := ecs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ECS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ECS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ECS { svc := &ECS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-13", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go index 6b1a11c900a..3b6336d6224 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -46,11 +46,11 @@ const ( // svc := efs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EFS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EFS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EFS { svc := &EFS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go index a51bf5458c6..9c3ae450660 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/eks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/eks/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EKS { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "eks" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EKS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EKS { svc := &EKS{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index 6df29f91868..c2a35cf2c31 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -377,6 +377,91 @@ func (c *ElastiCache) BatchStopUpdateActionWithContext(ctx aws.Context, input *B return out, req.Send() } +const opCompleteMigration = "CompleteMigration" + +// CompleteMigrationRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMigration for more information on using the CompleteMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CompleteMigrationRequest method. +// req, resp := client.CompleteMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigrationRequest(input *CompleteMigrationInput) (req *request.Request, output *CompleteMigrationOutput) { + op := &request.Operation{ + Name: opCompleteMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CompleteMigrationInput{} + } + + output = &CompleteMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMigration API operation for Amazon ElastiCache. +// +// Complete the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation CompleteMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupNotUnderMigrationFault "ReplicationGroupNotUnderMigrationFault" +// The designated replication group is not available for data migration. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CompleteMigration +func (c *ElastiCache) CompleteMigration(input *CompleteMigrationInput) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + return out, req.Send() +} + +// CompleteMigrationWithContext is the same as CompleteMigration with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) CompleteMigrationWithContext(ctx aws.Context, input *CompleteMigrationInput, opts ...request.Option) (*CompleteMigrationOutput, error) { + req, out := c.CompleteMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCopySnapshot = "CopySnapshot" // CopySnapshotRequest generates a "aws/request.Request" representing the @@ -5264,6 +5349,94 @@ func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context return out, req.Send() } +const opStartMigration = "StartMigration" + +// StartMigrationRequest generates a "aws/request.Request" representing the +// client's request for the StartMigration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartMigration for more information on using the StartMigration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the StartMigrationRequest method. +// req, resp := client.StartMigrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigrationRequest(input *StartMigrationInput) (req *request.Request, output *StartMigrationOutput) { + op := &request.Operation{ + Name: opStartMigration, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &StartMigrationInput{} + } + + output = &StartMigrationOutput{} + req = c.newRequest(op, input, output) + return +} + +// StartMigration API operation for Amazon ElastiCache. +// +// Start the migration of data. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon ElastiCache's +// API operation StartMigration for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault" +// The specified replication group does not exist. +// +// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState" +// The requested replication group is not in the available state. +// +// * ErrCodeReplicationGroupAlreadyUnderMigrationFault "ReplicationGroupAlreadyUnderMigrationFault" +// The targeted replication group is not available. +// +// * ErrCodeInvalidParameterValueException "InvalidParameterValue" +// The value for a parameter is invalid. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/StartMigration +func (c *ElastiCache) StartMigration(input *StartMigrationInput) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + return out, req.Send() +} + +// StartMigrationWithContext is the same as StartMigration with the addition of +// the ability to pass a context and additional request options. +// +// See StartMigration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *ElastiCache) StartMigrationWithContext(ctx aws.Context, input *StartMigrationInput, opts ...request.Option) (*StartMigrationOutput, error) { + req, out := c.StartMigrationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opTestFailover = "TestFailover" // TestFailoverRequest generates a "aws/request.Request" representing the @@ -6769,6 +6942,78 @@ func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup { return s } +type CompleteMigrationInput struct { + _ struct{} `type:"structure"` + + // Forces the migration to stop without ensuring that data is in sync. It is + // recommended to use this option only to abort the migration and not recommended + // when application wants to continue migration to ElastiCache. + Force *bool `type:"boolean"` + + // The ID of the replication group to which data is being migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMigrationInput"} + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetForce sets the Force field's value. +func (s *CompleteMigrationInput) SetForce(v bool) *CompleteMigrationInput { + s.Force = &v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *CompleteMigrationInput) SetReplicationGroupId(v string) *CompleteMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type CompleteMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s CompleteMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CompleteMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *CompleteMigrationOutput { + s.ReplicationGroup = v + return s +} + // Node group (shard) configuration options when adding or removing replicas. // Each node group (shard) configuration has the following members: NodeGroupId, // NewReplicaCount, and PreferredAvailabilityZones. @@ -8326,6 +8571,39 @@ func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput { return s } +// The endpoint from which data should be migrated. +type CustomerNodeEndpoint struct { + _ struct{} `type:"structure"` + + // The address of the node endpoint + Address *string `type:"string"` + + // The port of the node endpoint + Port *int64 `type:"integer"` +} + +// String returns the string representation +func (s CustomerNodeEndpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomerNodeEndpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *CustomerNodeEndpoint) SetAddress(v string) *CustomerNodeEndpoint { + s.Address = &v + return s +} + +// SetPort sets the Port field's value. +func (s *CustomerNodeEndpoint) SetPort(v int64) *CustomerNodeEndpoint { + s.Port = &v + return s +} + type DecreaseReplicaCountInput struct { _ struct{} `type:"structure"` @@ -14118,6 +14396,82 @@ func (s *Snapshot) SetVpcId(v string) *Snapshot { return s } +type StartMigrationInput struct { + _ struct{} `type:"structure"` + + // List of endpoints from which data should be migrated. For Redis (cluster + // mode disabled), list should have only one element. + // + // CustomerNodeEndpointList is a required field + CustomerNodeEndpointList []*CustomerNodeEndpoint `type:"list" required:"true"` + + // The ID of the replication group to which data should be migrated. + // + // ReplicationGroupId is a required field + ReplicationGroupId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s StartMigrationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartMigrationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartMigrationInput"} + if s.CustomerNodeEndpointList == nil { + invalidParams.Add(request.NewErrParamRequired("CustomerNodeEndpointList")) + } + if s.ReplicationGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomerNodeEndpointList sets the CustomerNodeEndpointList field's value. +func (s *StartMigrationInput) SetCustomerNodeEndpointList(v []*CustomerNodeEndpoint) *StartMigrationInput { + s.CustomerNodeEndpointList = v + return s +} + +// SetReplicationGroupId sets the ReplicationGroupId field's value. +func (s *StartMigrationInput) SetReplicationGroupId(v string) *StartMigrationInput { + s.ReplicationGroupId = &v + return s +} + +type StartMigrationOutput struct { + _ struct{} `type:"structure"` + + // Contains all of the attributes of a specific Redis replication group. + ReplicationGroup *ReplicationGroup `type:"structure"` +} + +// String returns the string representation +func (s StartMigrationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StartMigrationOutput) GoString() string { + return s.String() +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *StartMigrationOutput) SetReplicationGroup(v *ReplicationGroup) *StartMigrationOutput { + s.ReplicationGroup = v + return s +} + // Represents the subnet associated with a cluster. This parameter refers to // subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used with // ElastiCache. diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go index 8f75570b996..25579b1d5d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/errors.go @@ -235,12 +235,24 @@ const ( // The specified replication group already exists. ErrCodeReplicationGroupAlreadyExistsFault = "ReplicationGroupAlreadyExists" + // ErrCodeReplicationGroupAlreadyUnderMigrationFault for service response error code + // "ReplicationGroupAlreadyUnderMigrationFault". + // + // The targeted replication group is not available. + ErrCodeReplicationGroupAlreadyUnderMigrationFault = "ReplicationGroupAlreadyUnderMigrationFault" + // ErrCodeReplicationGroupNotFoundFault for service response error code // "ReplicationGroupNotFoundFault". // // The specified replication group does not exist. ErrCodeReplicationGroupNotFoundFault = "ReplicationGroupNotFoundFault" + // ErrCodeReplicationGroupNotUnderMigrationFault for service response error code + // "ReplicationGroupNotUnderMigrationFault". + // + // The designated replication group is not available for data migration. + ErrCodeReplicationGroupNotUnderMigrationFault = "ReplicationGroupNotUnderMigrationFault" + // ErrCodeReservedCacheNodeAlreadyExistsFault for service response error code // "ReservedCacheNodeAlreadyExists". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go index fd5f8c51707..2ad929dca32 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticache.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElastiCache { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElastiCache { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElastiCache { svc := &ElastiCache{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-02-02", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go index 12e8b1c819a..841587452ab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticbeanstalk.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticBeanstalk { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticBeanstalk { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticBeanstalk { svc := &ElasticBeanstalk{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index d2f8f382733..9f309cdb009 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -46,11 +46,11 @@ const ( // svc := elasticsearchservice.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticsearchService { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticsearchService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticsearchService { svc := &ElasticsearchService{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-01-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go index 30acb8d1bc0..9a33ddbf610 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -46,11 +46,11 @@ const ( // svc := elastictranscoder.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ElasticTranscoder { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ElasticTranscoder { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ElasticTranscoder { svc := &ElasticTranscoder{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-09-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 5dfdd322c9b..73e40b747c9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -46,11 +46,11 @@ const ( // svc := elb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELB { svc := &ELB{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go index ad97e8df885..1fcdb5bf44c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elbv2/service.go @@ -46,11 +46,11 @@ const ( // svc := elbv2.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ELBV2 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ELBV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ELBV2 { svc := &ELBV2{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go index 92735a793d4..40af82bfabc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -46,11 +46,11 @@ const ( // svc := emr.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *EMR { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *EMR { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *EMR { svc := &EMR{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go index bcdf23dffb9..6e234b3c9e6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -46,11 +46,11 @@ const ( // svc := firehose.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Firehose { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Firehose { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Firehose { svc := &Firehose{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-04", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go index 6103e57fd82..e39fa854236 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fms/service.go @@ -46,11 +46,11 @@ const ( // svc := fms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FMS { svc := &FMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go index 2f1cd5c6fd5..f7b4cda112f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/forecastservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ForecastService { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "forecast" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ForecastService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ForecastService { svc := &ForecastService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-06-26", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go index 544a82e1b07..0afe7ddcb61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/fsx/service.go @@ -46,11 +46,11 @@ const ( // svc := fsx.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *FSx { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *FSx { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *FSx { svc := &FSx{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-03-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go index 5fffd192979..c29096f3f1b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/api.go @@ -20304,6 +20304,30 @@ const ( // EC2InstanceTypeC48xlarge is a EC2InstanceType enum value EC2InstanceTypeC48xlarge = "c4.8xlarge" + // EC2InstanceTypeC5Large is a EC2InstanceType enum value + EC2InstanceTypeC5Large = "c5.large" + + // EC2InstanceTypeC5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeC5Xlarge = "c5.xlarge" + + // EC2InstanceTypeC52xlarge is a EC2InstanceType enum value + EC2InstanceTypeC52xlarge = "c5.2xlarge" + + // EC2InstanceTypeC54xlarge is a EC2InstanceType enum value + EC2InstanceTypeC54xlarge = "c5.4xlarge" + + // EC2InstanceTypeC59xlarge is a EC2InstanceType enum value + EC2InstanceTypeC59xlarge = "c5.9xlarge" + + // EC2InstanceTypeC512xlarge is a EC2InstanceType enum value + EC2InstanceTypeC512xlarge = "c5.12xlarge" + + // EC2InstanceTypeC518xlarge is a EC2InstanceType enum value + EC2InstanceTypeC518xlarge = "c5.18xlarge" + + // EC2InstanceTypeC524xlarge is a EC2InstanceType enum value + EC2InstanceTypeC524xlarge = "c5.24xlarge" + // EC2InstanceTypeR3Large is a EC2InstanceType enum value EC2InstanceTypeR3Large = "r3.large" @@ -20337,6 +20361,30 @@ const ( // EC2InstanceTypeR416xlarge is a EC2InstanceType enum value EC2InstanceTypeR416xlarge = "r4.16xlarge" + // EC2InstanceTypeR5Large is a EC2InstanceType enum value + EC2InstanceTypeR5Large = "r5.large" + + // EC2InstanceTypeR5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeR5Xlarge = "r5.xlarge" + + // EC2InstanceTypeR52xlarge is a EC2InstanceType enum value + EC2InstanceTypeR52xlarge = "r5.2xlarge" + + // EC2InstanceTypeR54xlarge is a EC2InstanceType enum value + EC2InstanceTypeR54xlarge = "r5.4xlarge" + + // EC2InstanceTypeR58xlarge is a EC2InstanceType enum value + EC2InstanceTypeR58xlarge = "r5.8xlarge" + + // EC2InstanceTypeR512xlarge is a EC2InstanceType enum value + EC2InstanceTypeR512xlarge = "r5.12xlarge" + + // EC2InstanceTypeR516xlarge is a EC2InstanceType enum value + EC2InstanceTypeR516xlarge = "r5.16xlarge" + + // EC2InstanceTypeR524xlarge is a EC2InstanceType enum value + EC2InstanceTypeR524xlarge = "r5.24xlarge" + // EC2InstanceTypeM3Medium is a EC2InstanceType enum value EC2InstanceTypeM3Medium = "m3.medium" @@ -20363,6 +20411,30 @@ const ( // EC2InstanceTypeM410xlarge is a EC2InstanceType enum value EC2InstanceTypeM410xlarge = "m4.10xlarge" + + // EC2InstanceTypeM5Large is a EC2InstanceType enum value + EC2InstanceTypeM5Large = "m5.large" + + // EC2InstanceTypeM5Xlarge is a EC2InstanceType enum value + EC2InstanceTypeM5Xlarge = "m5.xlarge" + + // EC2InstanceTypeM52xlarge is a EC2InstanceType enum value + EC2InstanceTypeM52xlarge = "m5.2xlarge" + + // EC2InstanceTypeM54xlarge is a EC2InstanceType enum value + EC2InstanceTypeM54xlarge = "m5.4xlarge" + + // EC2InstanceTypeM58xlarge is a EC2InstanceType enum value + EC2InstanceTypeM58xlarge = "m5.8xlarge" + + // EC2InstanceTypeM512xlarge is a EC2InstanceType enum value + EC2InstanceTypeM512xlarge = "m5.12xlarge" + + // EC2InstanceTypeM516xlarge is a EC2InstanceType enum value + EC2InstanceTypeM516xlarge = "m5.16xlarge" + + // EC2InstanceTypeM524xlarge is a EC2InstanceType enum value + EC2InstanceTypeM524xlarge = "m5.24xlarge" ) const ( @@ -20633,6 +20705,9 @@ const ( // OperatingSystemAmazonLinux is a OperatingSystem enum value OperatingSystemAmazonLinux = "AMAZON_LINUX" + + // OperatingSystemAmazonLinux2 is a OperatingSystem enum value + OperatingSystemAmazonLinux2 = "AMAZON_LINUX_2" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go index a2361e47690..2d5f27330a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/gamelift/service.go @@ -46,11 +46,11 @@ const ( // svc := gamelift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *GameLift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GameLift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GameLift { svc := &GameLift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-10-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go index 85e6e367b20..b8e0cffc6cb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -46,11 +46,11 @@ const ( // svc := glacier.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glacier { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glacier { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glacier { svc := &Glacier{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-06-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go index 31552ab8c29..7750c412f61 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/globalaccelerator/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GlobalAccelerator { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "globalaccelerator" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GlobalAccelerator { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GlobalAccelerator { svc := &GlobalAccelerator{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go index 075b0a1df6c..25770968a17 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glue/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glue/service.go @@ -46,11 +46,11 @@ const ( // svc := glue.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Glue { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Glue { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Glue { svc := &Glue{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go index 1c9835a9161..eec5c772aed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/guardduty/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *GuardDuty { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "guardduty" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *GuardDuty { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *GuardDuty { svc := &GuardDuty{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-28", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index 940b4ce3283..62228c482e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -46,11 +46,11 @@ const ( // svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IAM { svc := &IAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-05-08", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go index 2e68b4e4d23..aae2fa86571 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/inspector/service.go @@ -46,11 +46,11 @@ const ( // svc := inspector.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Inspector { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Inspector { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Inspector { svc := &Inspector{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-02-16", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go index 10a95d5607c..0391dde5474 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iot/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iot/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoT { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "execute-api" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoT { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoT { svc := &IoT{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-05-28", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go index 54edbd56f34..97b51926b97 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotanalytics/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTAnalytics { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "iotanalytics" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoTAnalytics { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTAnalytics { svc := &IoTAnalytics{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go index 551a03fef98..954cf1a17dc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/api.go @@ -1557,7 +1557,7 @@ type ActionData struct { ClearTimer *ClearTimerAction `locationName:"clearTimer" type:"structure"` // Sends information about the detector model instance and the event which triggered - // the action to a Kinesis Data Firehose stream. + // the action to a Kinesis Data Firehose delivery stream. Firehose *FirehoseAction `locationName:"firehose" type:"structure"` // Sends an IoT Events input, passing in information about the detector model @@ -1567,7 +1567,7 @@ type ActionData struct { // Publishes an MQTT message with the given topic to the AWS IoT message broker. IotTopicPublish *IotTopicPublishAction `locationName:"iotTopicPublish" type:"structure"` - // Calls a Lambda function, passing in information about the detector model + // Calls an AWS Lambda function, passing in information about the detector model // instance and the event which triggered the action. Lambda *LambdaAction `locationName:"lambda" type:"structure"` @@ -1584,7 +1584,7 @@ type ActionData struct { Sns *SNSTopicPublishAction `locationName:"sns" type:"structure"` // Sends information about the detector model instance and the event which triggered - // the action to an AWS SQS queue. + // the action to an Amazon SQS queue. Sqs *SqsAction `locationName:"sqs" type:"structure"` } @@ -1829,11 +1829,16 @@ type CreateDetectorModelInput struct { // DetectorModelName is a required field DetectorModelName *string `locationName:"detectorModelName" min:"1" type:"string" required:"true"` - // The input attribute key used to identify a device or system in order to create - // a detector (an instance of the detector model) and then to route each input - // received to the appropriate detector (instance). This parameter uses a JSON-path - // expression to specify the attribute-value pair in the message payload of - // each input that is used to identify the device associated with the input. + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. Key *string `locationName:"key" min:"1" type:"string"` // The ARN of the role that grants permission to AWS IoT Events to perform its @@ -1917,6 +1922,12 @@ func (s *CreateDetectorModelInput) SetDetectorModelName(v string) *CreateDetecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *CreateDetectorModelInput) SetEvaluationMethod(v string) *CreateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + // SetKey sets the Key field's value. func (s *CreateDetectorModelInput) SetKey(v string) *CreateDetectorModelInput { s.Key = &v @@ -2464,11 +2475,16 @@ type DetectorModelConfiguration struct { // The version of the detector model. DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` - // The input attribute key used to identify a device or system in order to create - // a detector (an instance of the detector model) and then to route each input - // received to the appropriate detector (instance). This parameter uses a JSON-path - // expression to specify the attribute-value pair in the message payload of - // each input that is used to identify the device associated with the input. + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + + // The input attribute key used to identify a device or system to create a detector + // (an instance of the detector model) and then to route each input received + // to the appropriate detector (instance). This parameter uses a JSON-path expression + // to specify the attribute-value pair in the message payload of each input + // that is used to identify the device associated with the input. Key *string `locationName:"key" min:"1" type:"string"` // The time the detector model was last updated. @@ -2522,6 +2538,12 @@ func (s *DetectorModelConfiguration) SetDetectorModelVersion(v string) *Detector return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelConfiguration) SetEvaluationMethod(v string) *DetectorModelConfiguration { + s.EvaluationMethod = &v + return s +} + // SetKey sets the Key field's value. func (s *DetectorModelConfiguration) SetKey(v string) *DetectorModelConfiguration { s.Key = &v @@ -2673,6 +2695,11 @@ type DetectorModelVersionSummary struct { // The ID of the detector model version. DetectorModelVersion *string `locationName:"detectorModelVersion" min:"1" type:"string"` + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + // The last time the detector model version was updated. LastUpdateTime *time.Time `locationName:"lastUpdateTime" type:"timestamp"` @@ -2718,6 +2745,12 @@ func (s *DetectorModelVersionSummary) SetDetectorModelVersion(v string) *Detecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *DetectorModelVersionSummary) SetEvaluationMethod(v string) *DetectorModelVersionSummary { + s.EvaluationMethod = &v + return s +} + // SetLastUpdateTime sets the LastUpdateTime field's value. func (s *DetectorModelVersionSummary) SetLastUpdateTime(v time.Time) *DetectorModelVersionSummary { s.LastUpdateTime = &v @@ -2807,18 +2840,18 @@ func (s *Event) SetEventName(v string) *Event { } // Sends information about the detector model instance and the event which triggered -// the action to a Kinesis Data Firehose stream. +// the action to a Kinesis Data Firehose delivery stream. type FirehoseAction struct { _ struct{} `type:"structure"` - // The name of the Kinesis Data Firehose stream where the data is written. + // The name of the Kinesis Data Firehose delivery stream where the data is written. // // DeliveryStreamName is a required field DeliveryStreamName *string `locationName:"deliveryStreamName" type:"string" required:"true"` // A character separator that is used to separate records written to the Kinesis - // Data Firehose stream. Valid values are: '\n' (newline), '\t' (tab), '\r\n' - // (Windows newline), ',' (comma). + // Data Firehose delivery stream. Valid values are: '\n' (newline), '\t' (tab), + // '\r\n' (Windows newline), ',' (comma). Separator *string `locationName:"separator" type:"string"` } @@ -3137,12 +3170,12 @@ func (s *IotTopicPublishAction) SetMqttTopic(v string) *IotTopicPublishAction { return s } -// Calls a Lambda function, passing in information about the detector model +// Calls an AWS Lambda function, passing in information about the detector model // instance and the event which triggered the action. type LambdaAction struct { _ struct{} `type:"structure"` - // The ARN of the Lambda function which is executed. + // The ARN of the AWS Lambda function which is executed. // // FunctionArn is a required field FunctionArn *string `locationName:"functionArn" min:"1" type:"string" required:"true"` @@ -4001,11 +4034,11 @@ func (s *SetVariableAction) SetVariableName(v string) *SetVariableAction { } // Sends information about the detector model instance and the event which triggered -// the action to an AWS SQS queue. +// the action to an Amazon SQS queue. type SqsAction struct { _ struct{} `type:"structure"` - // The URL of the SQS queue where the data is written. + // The URL of the Amazon SQS queue where the data is written. // // QueueUrl is a required field QueueUrl *string `locationName:"queueUrl" type:"string" required:"true"` @@ -4447,6 +4480,11 @@ type UpdateDetectorModelInput struct { // DetectorModelName is a required field DetectorModelName *string `location:"uri" locationName:"detectorModelName" min:"1" type:"string" required:"true"` + // When set to SERIAL, variables are updated and event conditions evaluated + // in the order that the events are defined. When set to BATCH, variables are + // updated and events performed only after all event conditions are evaluated. + EvaluationMethod *string `locationName:"evaluationMethod" type:"string" enum:"EvaluationMethod"` + // The ARN of the role that grants permission to AWS IoT Events to perform its // operations. // @@ -4512,6 +4550,12 @@ func (s *UpdateDetectorModelInput) SetDetectorModelName(v string) *UpdateDetecto return s } +// SetEvaluationMethod sets the EvaluationMethod field's value. +func (s *UpdateDetectorModelInput) SetEvaluationMethod(v string) *UpdateDetectorModelInput { + s.EvaluationMethod = &v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *UpdateDetectorModelInput) SetRoleArn(v string) *UpdateDetectorModelInput { s.RoleArn = &v @@ -4656,6 +4700,14 @@ const ( DetectorModelVersionStatusFailed = "FAILED" ) +const ( + // EvaluationMethodBatch is a EvaluationMethod enum value + EvaluationMethodBatch = "BATCH" + + // EvaluationMethodSerial is a EvaluationMethod enum value + EvaluationMethodSerial = "SERIAL" +) + const ( // InputStatusCreating is a InputStatus enum value InputStatusCreating = "CREATING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go index af03d02a01a..13eeb1dfb3c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iotevents/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *IoTEvents { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "iotevents" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IoTEvents { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *IoTEvents { svc := &IoTEvents{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-07-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go index ce7262a6ac3..f95dd4a89da 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/api.go @@ -1097,7 +1097,7 @@ func (c *Kafka) ListConfigurationRevisionsRequest(input *ListConfigurationRevisi // ListConfigurationRevisions API operation for Managed Streaming for Kafka. // -// Returns a list of all the MSK configurations in this Region. +// Returns a list of all the revisions of an MSK configuration. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1745,6 +1745,99 @@ func (c *Kafka) UntagResourceWithContext(ctx aws.Context, input *UntagResourceIn return out, req.Send() } +const opUpdateBrokerCount = "UpdateBrokerCount" + +// UpdateBrokerCountRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBrokerCount operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateBrokerCount for more information on using the UpdateBrokerCount +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateBrokerCountRequest method. +// req, resp := client.UpdateBrokerCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCountRequest(input *UpdateBrokerCountInput) (req *request.Request, output *UpdateBrokerCountOutput) { + op := &request.Operation{ + Name: opUpdateBrokerCount, + HTTPMethod: "PUT", + HTTPPath: "/v1/clusters/{clusterArn}/nodes/count", + } + + if input == nil { + input = &UpdateBrokerCountInput{} + } + + output = &UpdateBrokerCountOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateBrokerCount API operation for Managed Streaming for Kafka. +// +// Updates the number of broker nodes in the cluster. You can use this operation +// to increase the number of brokers in an existing cluster. You can't decrease +// the number of brokers. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Managed Streaming for Kafka's +// API operation UpdateBrokerCount for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceUnavailableException "ServiceUnavailableException" +// Returns information about an error. +// +// * ErrCodeBadRequestException "BadRequestException" +// Returns information about an error. +// +// * ErrCodeUnauthorizedException "UnauthorizedException" +// Returns information about an error. +// +// * ErrCodeInternalServerErrorException "InternalServerErrorException" +// Returns information about an error. +// +// * ErrCodeForbiddenException "ForbiddenException" +// Returns information about an error. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/kafka-2018-11-14/UpdateBrokerCount +func (c *Kafka) UpdateBrokerCount(input *UpdateBrokerCountInput) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + return out, req.Send() +} + +// UpdateBrokerCountWithContext is the same as UpdateBrokerCount with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateBrokerCount for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Kafka) UpdateBrokerCountWithContext(ctx aws.Context, input *UpdateBrokerCountInput, opts ...request.Option) (*UpdateBrokerCountOutput, error) { + req, out := c.UpdateBrokerCountRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opUpdateBrokerStorage = "UpdateBrokerStorage" // UpdateBrokerStorageRequest generates a "aws/request.Request" representing the @@ -1990,13 +2083,7 @@ func (s *BrokerEBSVolumeInfo) SetVolumeSizeGB(v int64) *BrokerEBSVolumeInfo { type BrokerNodeGroupInfo struct { _ struct{} `type:"structure"` - // The distribution of broker nodes across Availability Zones. This is an optional - // parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. - // You can also explicitly set this parameter to the value DEFAULT. No other - // values are currently allowed. - // - // Amazon MSK distributes the broker nodes evenly across the Availability Zones - // that correspond to the subnets you provide when you create the cluster. + // The distribution of broker nodes across Availability Zones. BrokerAZDistribution *string `locationName:"brokerAZDistribution" type:"string" enum:"BrokerAZDistribution"` // The list of subnets to connect to in the client virtual private cloud (VPC). @@ -2008,8 +2095,8 @@ type BrokerNodeGroupInfo struct { ClientSubnets []*string `locationName:"clientSubnets" type:"list" required:"true"` // The type of Amazon EC2 instances to use for Kafka brokers. The following - // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge,kafka.m5.4xlarge, - // kafka.m5.12xlarge, and kafka.m5.24xlarge. + // instance types are allowed: kafka.m5.large, kafka.m5.xlarge, kafka.m5.2xlarge, + // kafka.m5.4xlarge, kafka.m5.12xlarge, and kafka.m5.24xlarge. // // InstanceType is a required field InstanceType *string `locationName:"instanceType" min:"5" type:"string" required:"true"` @@ -2017,7 +2104,9 @@ type BrokerNodeGroupInfo struct { // The AWS security groups to associate with the elastic network interfaces // in order to specify who can connect to and communicate with the Amazon MSK // cluster. If you don't specify a security group, Amazon MSK uses the default - // security group associated with the VPC. + // security group associated with the VPC. If you specify security groups that + // were shared with you, you must ensure that you have permissions to them. + // Specifically, you need the ec2:DescribeSecurityGroups permission. SecurityGroups []*string `locationName:"securityGroups" type:"list"` // Contains information about storage volumes attached to MSK broker nodes. @@ -2252,7 +2341,9 @@ type ClusterInfo struct { // brokers in the cluster. CurrentBrokerSoftwareInfo *BrokerSoftwareInfo `locationName:"currentBrokerSoftwareInfo" type:"structure"` - // The current version of the MSK cluster. + // The current version of the MSK cluster. Cluster versions aren't simple integers. + // You can obtain the current version by describing the cluster. An example + // version is KTVPDKIKX0DER. CurrentVersion *string `locationName:"currentVersion" type:"string"` // Includes all encryption-related information. @@ -2505,7 +2596,8 @@ type Configuration struct { // LatestRevision is a required field LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure" required:"true"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` @@ -2673,8 +2765,7 @@ type CreateClusterInput struct { // ClusterName is a required field ClusterName *string `locationName:"clusterName" min:"1" type:"string" required:"true"` - // Represents the configuration that you want MSK to use for the brokers in - // a cluster. + // Represents the configuration that you want MSK to use for the cluster. ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure"` // Includes all encryption-related information. @@ -2862,7 +2953,8 @@ type CreateConfigurationInput struct { // KafkaVersions is a required field KafkaVersions []*string `locationName:"kafkaVersions" type:"list" required:"true"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". // // Name is a required field Name *string `locationName:"name" type:"string" required:"true"` @@ -2939,7 +3031,8 @@ type CreateConfigurationOutput struct { // Latest revision of the configuration. LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` } @@ -3241,7 +3334,8 @@ type DescribeConfigurationOutput struct { // Latest revision of the configuration. LatestRevision *ConfigurationRevision `locationName:"latestRevision" type:"structure"` - // The name of the configuration. + // The name of the configuration. Configuration names are strings that match + // the regex "^[0-9A-Za-z-]+$". Name *string `locationName:"name" type:"string"` } @@ -3485,7 +3579,7 @@ type EncryptionInTransit struct { _ struct{} `type:"structure"` // Indicates the encryption setting for data in transit between clients and - // brokers. The following are the possible values. + // brokers. You must set it to one of the following values. // // TLS means that client-broker communication is enabled with TLS only. // @@ -3495,7 +3589,7 @@ type EncryptionInTransit struct { // PLAINTEXT means that client-broker communication is enabled in plaintext // only. // - // The default value is TLS_PLAINTEXT. + // The default value is TLS. ClientBroker *string `locationName:"clientBroker" type:"string" enum:"ClientBroker"` // When set to true, it indicates that data communication among the broker nodes @@ -3657,7 +3751,9 @@ type GetBootstrapBrokersOutput struct { // A string containing one or more hostname:port pairs. BootstrapBrokerString *string `locationName:"bootstrapBrokerString" type:"string"` - // A string containing one or more DNS names (or IP) and TLS port pairs. + // A string containing one or more DNS names (or IP) and TLS port pairs. The + // following is an example. + // { "BootstrapBrokerStringTls": "b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094"} BootstrapBrokerStringTls *string `locationName:"bootstrapBrokerStringTls" type:"string"` } @@ -4497,6 +4593,111 @@ func (s UntagResourceOutput) GoString() string { return s.String() } +// Request body for UpdateBrokerCount. +type UpdateBrokerCountInput struct { + _ struct{} `type:"structure"` + + // ClusterArn is a required field + ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` + + // The current version of the cluster. + // + // CurrentVersion is a required field + CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` + + // The number of broker nodes that you want the cluster to have after this operation + // completes successfully. + // + // TargetNumberOfBrokerNodes is a required field + TargetNumberOfBrokerNodes *int64 `locationName:"targetNumberOfBrokerNodes" min:"1" type:"integer" required:"true"` +} + +// String returns the string representation +func (s UpdateBrokerCountInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateBrokerCountInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateBrokerCountInput"} + if s.ClusterArn == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterArn")) + } + if s.ClusterArn != nil && len(*s.ClusterArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClusterArn", 1)) + } + if s.CurrentVersion == nil { + invalidParams.Add(request.NewErrParamRequired("CurrentVersion")) + } + if s.TargetNumberOfBrokerNodes == nil { + invalidParams.Add(request.NewErrParamRequired("TargetNumberOfBrokerNodes")) + } + if s.TargetNumberOfBrokerNodes != nil && *s.TargetNumberOfBrokerNodes < 1 { + invalidParams.Add(request.NewErrParamMinValue("TargetNumberOfBrokerNodes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountInput) SetClusterArn(v string) *UpdateBrokerCountInput { + s.ClusterArn = &v + return s +} + +// SetCurrentVersion sets the CurrentVersion field's value. +func (s *UpdateBrokerCountInput) SetCurrentVersion(v string) *UpdateBrokerCountInput { + s.CurrentVersion = &v + return s +} + +// SetTargetNumberOfBrokerNodes sets the TargetNumberOfBrokerNodes field's value. +func (s *UpdateBrokerCountInput) SetTargetNumberOfBrokerNodes(v int64) *UpdateBrokerCountInput { + s.TargetNumberOfBrokerNodes = &v + return s +} + +// Response body for UpdateBrokerCount. +type UpdateBrokerCountOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the cluster. + ClusterArn *string `locationName:"clusterArn" type:"string"` + + // The Amazon Resource Name (ARN) of the cluster operation. + ClusterOperationArn *string `locationName:"clusterOperationArn" type:"string"` +} + +// String returns the string representation +func (s UpdateBrokerCountOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateBrokerCountOutput) GoString() string { + return s.String() +} + +// SetClusterArn sets the ClusterArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterArn(v string) *UpdateBrokerCountOutput { + s.ClusterArn = &v + return s +} + +// SetClusterOperationArn sets the ClusterOperationArn field's value. +func (s *UpdateBrokerCountOutput) SetClusterOperationArn(v string) *UpdateBrokerCountOutput { + s.ClusterOperationArn = &v + return s +} + // Request object for UpdateBrokerStorage. type UpdateBrokerStorageInput struct { _ struct{} `type:"structure"` @@ -4513,6 +4714,11 @@ type UpdateBrokerStorageInput struct { // Describes the target volume size and the ID of the broker to apply the update // to. // + // The value you specify for Target-Volume-in-GiB must be a whole number that + // is greater than 100 GiB. + // + // The storage per broker after the update operation can't exceed 16384 GiB. + // // TargetBrokerEBSVolumeInfo is a required field TargetBrokerEBSVolumeInfo []*BrokerEBSVolumeInfo `locationName:"targetBrokerEBSVolumeInfo" type:"list" required:"true"` } @@ -4617,13 +4823,12 @@ type UpdateClusterConfigurationInput struct { // ClusterArn is a required field ClusterArn *string `location:"uri" locationName:"clusterArn" type:"string" required:"true"` - // Represents the configuration that you want MSK to use for the brokers in - // a cluster. + // Represents the configuration that you want MSK to use for the cluster. // // ConfigurationInfo is a required field ConfigurationInfo *ConfigurationInfo `locationName:"configurationInfo" type:"structure" required:"true"` - // The version of the cluster that needs to be updated. + // The version of the cluster that you want to update. // // CurrentVersion is a required field CurrentVersion *string `locationName:"currentVersion" type:"string" required:"true"` @@ -4777,13 +4982,10 @@ func (s *ZookeeperNodeInfo) SetZookeeperVersion(v string) *ZookeeperNodeInfo { return s } -// The distribution of broker nodes across Availability Zones. This is an optional -// parameter. If you don't specify it, Amazon MSK gives it the value DEFAULT. -// You can also explicitly set this parameter to the value DEFAULT. No other -// values are currently allowed. -// -// Amazon MSK distributes the broker nodes evenly across the Availability Zones -// that correspond to the subnets you provide when you create the cluster. +// The distribution of broker nodes across Availability Zones. By default, broker +// nodes are distributed among the Availability Zones of your Region. Currently, +// the only supported value is DEFAULT. You can either specify this value explicitly +// or leave it out. const ( // BrokerAZDistributionDefault is a BrokerAZDistribution enum value BrokerAZDistributionDefault = "DEFAULT" diff --git a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go index 577c3777e17..9c07694b13a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kafka/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kafka { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kafka" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kafka { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kafka { svc := &Kafka{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index 888f7a4363f..b282cf645b7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -7379,6 +7379,8 @@ type SubscribeToShardEventStream struct { // may result in resource leaks. func (es *SubscribeToShardEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -7388,8 +7390,6 @@ func (es *SubscribeToShardEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go index 7c3e8c48a28..6c561f1e8eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesis.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Kinesis { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Kinesis { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Kinesis { svc := &Kinesis{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-12-02", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go index 153daad6eba..d6375e1a205 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalytics/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisanalytics.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalytics { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalytics { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalytics { svc := &KinesisAnalytics{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-14", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go index d066c2e6c70..6399b45ae45 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisanalyticsv2/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisAnalyticsV2 { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "kinesisanalytics" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisAnalyticsV2 { svc := &KinesisAnalyticsV2{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-23", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go index a723794cd00..dc21b52f359 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesisvideo/service.go @@ -46,11 +46,11 @@ const ( // svc := kinesisvideo.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KinesisVideo { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KinesisVideo { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KinesisVideo { svc := &KinesisVideo{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-30", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index 6d062f32fc8..30a7b6875d3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -46,11 +46,11 @@ const ( // svc := kms.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *KMS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *KMS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *KMS { svc := &KMS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go index 495ab0b69bf..a13b061c64d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lakeformation/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *LakeFormation { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "lakeformation" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LakeFormation { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LakeFormation { svc := &LakeFormation{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-31", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go index 1cccdda0842..f137393fc0c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -46,11 +46,11 @@ const ( // svc := lambda.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lambda { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lambda { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lambda { svc := &Lambda{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-03-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go index 0f9509ff725..98937996781 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lexmodelbuildingservice/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *LexModelBuildingService if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "lex" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LexModelBuildingService { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LexModelBuildingService { svc := &LexModelBuildingService{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-04-19", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go index 5a8fc996539..fca9b9c9f47 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/licensemanager/service.go @@ -46,11 +46,11 @@ const ( // svc := licensemanager.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *LicenseManager { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *LicenseManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *LicenseManager { svc := &LicenseManager{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go index b9f97faa8a6..092633b75a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lightsail/service.go @@ -46,11 +46,11 @@ const ( // svc := lightsail.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Lightsail { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Lightsail { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Lightsail { svc := &Lightsail{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go index 0b38598f0fe..9e5f20a2699 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/macie/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/macie/service.go @@ -46,11 +46,11 @@ const ( // svc := macie.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Macie { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Macie { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Macie { svc := &Macie{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-12-19", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go index d3049e6c546..acd42867a67 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/managedblockchain/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ManagedBlockchain { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "managedblockchain" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ManagedBlockchain { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ManagedBlockchain { svc := &ManagedBlockchain{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-24", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go index 218d8e1905a..72aacea0b1c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconnect/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConnect { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconnect" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConnect { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConnect { svc := &MediaConnect{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go index 43d8865f45f..beada6a05f0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediaconvert/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaConvert { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediaconvert" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaConvert { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaConvert { svc := &MediaConvert{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-08-29", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go index 621855b02ad..885e54d4a7a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/medialive/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaLive { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "medialive" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaLive { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaLive { svc := &MediaLive{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-14", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go index e08a3e8787c..70659b45953 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediapackage/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaPackage { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediapackage" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaPackage { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaPackage { svc := &MediaPackage{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-12", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go index 492b3bf04c3..0dbef72bfa9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastore/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStore { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStore { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStore { svc := &MediaStore{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go index 4203140146d..0fcbc2141b6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mediastoredata/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MediaStoreData { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mediastore" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MediaStoreData { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MediaStoreData { svc := &MediaStoreData{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go index 4baaa385e39..d218f98529e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/mq/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/mq/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *MQ { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mq" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *MQ { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *MQ { svc := &MQ{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go index 3ddc5e5fba7..d5bb045c0c5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/neptune/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Neptune { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "rds" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Neptune { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Neptune { svc := &Neptune{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index a1a8307cae4..6abe01ab353 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -46,11 +46,11 @@ const ( // svc := opsworks.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *OpsWorks { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *OpsWorks { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *OpsWorks { svc := &OpsWorks{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-02-18", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go index 565c1715f3c..37d5a2dcd4a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/organizations/service.go @@ -46,11 +46,11 @@ const ( // svc := organizations.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Organizations { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Organizations { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Organizations { svc := &Organizations{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go index bea02eca744..1c330645b39 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/api.go @@ -5160,6 +5160,18 @@ type CreateSolutionVersionInput struct { // // SolutionArn is a required field SolutionArn *string `locationName:"solutionArn" type:"string" required:"true"` + + // The scope of training to be performed when creating the solution version. + // The FULL option trains the solution version based on the entirety of the + // input solution's training data, while the UPDATE option processes only the + // data that has changed in comparison to the input solution. Choose UPDATE + // when you want to incrementally update your solution version instead of creating + // an entirely new one. + // + // The UPDATE option can only be used when you already have an active solution + // version created from the input solution using the FULL option and the input + // solution was trained with the native-recipe-hrnn-coldstart recipe. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` } // String returns the string representation @@ -5191,6 +5203,12 @@ func (s *CreateSolutionVersionInput) SetSolutionArn(v string) *CreateSolutionVer return s } +// SetTrainingMode sets the TrainingMode field's value. +func (s *CreateSolutionVersionInput) SetTrainingMode(v string) *CreateSolutionVersionInput { + s.TrainingMode = &v + return s +} + type CreateSolutionVersionOutput struct { _ struct{} `type:"structure"` @@ -7492,10 +7510,12 @@ func (s *HPOObjective) SetType(v string) *HPOObjective { type HPOResourceConfig struct { _ struct{} `type:"structure"` - // The maximum number of training jobs. + // The maximum number of training jobs when you create a solution version. The + // maximum value for maxNumberOfTrainingJobs is 40. MaxNumberOfTrainingJobs *string `locationName:"maxNumberOfTrainingJobs" type:"string"` - // The maximum number of parallel training jobs. + // The maximum number of parallel training jobs when you create a solution version. + // The maximum value for maxParallelTrainingJobs is 10. MaxParallelTrainingJobs *string `locationName:"maxParallelTrainingJobs" type:"string"` } @@ -8891,15 +8911,15 @@ type SolutionVersion struct { // the model. EventType *string `locationName:"eventType" type:"string"` - // If training a solution version fails, the reason behind the failure. + // If training a solution version fails, the reason for the failure. FailureReason *string `locationName:"failureReason" type:"string"` // The date and time (in Unix time) that the solution was last updated. LastUpdatedDateTime *time.Time `locationName:"lastUpdatedDateTime" type:"timestamp"` - // When true, Amazon Personalize performs a search for the most optimal recipe - // according to the solution configuration. When false (the default), Amazon - // Personalize uses recipeArn. + // When true, Amazon Personalize searches for the most optimal recipe according + // to the solution configuration. When false (the default), Amazon Personalize + // uses recipeArn. PerformAutoML *bool `locationName:"performAutoML" type:"boolean"` // Whether to perform hyperparameter optimization (HPO) on the chosen recipe. @@ -8922,11 +8942,30 @@ type SolutionVersion struct { // // A solution version can be in one of the following states: // - // * CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED + // * CREATE PENDING + // + // * CREATE IN_PROGRESS + // + // * ACTIVE + // + // * CREATE FAILED Status *string `locationName:"status" type:"string"` - // The time used to train the model. + // The time used to train the model. You are billed for the time it takes to + // train a model. This field is visible only after Amazon Personalize successfully + // trains a model. TrainingHours *float64 `locationName:"trainingHours" type:"double"` + + // The scope of training used to create the solution version. The FULL option + // trains the solution version based on the entirety of the input solution's + // training data, while the UPDATE option processes only the training data that + // has changed since the creation of the last solution version. Choose UPDATE + // when you want to start recommending items added to the dataset without retraining + // the model. + // + // The UPDATE option can only be used after you've created a solution version + // with the FULL option and the training solution uses the native-recipe-hrnn-coldstart. + TrainingMode *string `locationName:"trainingMode" type:"string" enum:"TrainingMode"` } // String returns the string representation @@ -9017,6 +9056,12 @@ func (s *SolutionVersion) SetTrainingHours(v float64) *SolutionVersion { return s } +// SetTrainingMode sets the TrainingMode field's value. +func (s *SolutionVersion) SetTrainingMode(v string) *SolutionVersion { + s.TrainingMode = &v + return s +} + // Provides a summary of the properties of a solution version. For a complete // listing, call the DescribeSolutionVersion API. type SolutionVersionSummary struct { @@ -9169,3 +9214,11 @@ const ( // RecipeProviderService is a RecipeProvider enum value RecipeProviderService = "SERVICE" ) + +const ( + // TrainingModeFull is a TrainingMode enum value + TrainingModeFull = "FULL" + + // TrainingModeUpdate is a TrainingMode enum value + TrainingModeUpdate = "UPDATE" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go index 4b1ff03e3de..3ec8ede0c9c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/personalize/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Personalize { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "personalize" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Personalize { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Personalize { svc := &Personalize{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-05-22", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go index 487964c89cc..2a27113746c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pinpoint/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pinpoint { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "mobiletargeting" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pinpoint { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pinpoint { svc := &Pinpoint{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go index 90ff33d0a08..90f54acd02f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/pricing/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Pricing { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "pricing" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Pricing { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Pricing { svc := &Pricing{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-15", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go index c24c67a5b23..ea80bc02e1f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/qldb/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *QLDB { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "qldb" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *QLDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QLDB { svc := &QLDB{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-01-02", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go index 3ba978ad6a5..28623f680ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/quicksight/service.go @@ -46,11 +46,11 @@ const ( // svc := quicksight.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *QuickSight { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *QuickSight { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *QuickSight { svc := &QuickSight{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go index e1e1dc6683e..cf09e5ce963 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ram/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ram/service.go @@ -46,11 +46,11 @@ const ( // svc := ram.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RAM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RAM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RAM { svc := &RAM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-01-04", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index bbba1da65fa..7cee4532fc2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -534,8 +534,8 @@ func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityG // The state of the DB security group doesn't allow deletion. // // * ErrCodeAuthorizationAlreadyExistsFault "AuthorizationAlreadyExists" -// The specified CIDRIP or Amazon EC2 security group is already authorized for -// the specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group is already authorized +// for the specified DB security group. // // * ErrCodeAuthorizationQuotaExceededFault "AuthorizationQuotaExceeded" // The DB security group authorization quota has been reached. @@ -1157,6 +1157,98 @@ func (c *RDS) CopyOptionGroupWithContext(ctx aws.Context, input *CopyOptionGroup return out, req.Send() } +const opCreateCustomAvailabilityZone = "CreateCustomAvailabilityZone" + +// CreateCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateCustomAvailabilityZone for more information on using the CreateCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateCustomAvailabilityZoneRequest method. +// req, resp := client.CreateCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZoneRequest(input *CreateCustomAvailabilityZoneInput) (req *request.Request, output *CreateCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opCreateCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateCustomAvailabilityZoneInput{} + } + + output = &CreateCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Creates a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation CreateCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneAlreadyExistsFault "CustomAvailabilityZoneAlreadyExists" +// CustomAvailabilityZoneName is already used by an existing custom Availability +// Zone. +// +// * ErrCodeCustomAvailabilityZoneQuotaExceededFault "CustomAvailabilityZoneQuotaExceeded" +// You have exceeded the maximum number of custom Availability Zones. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/CreateCustomAvailabilityZone +func (c *RDS) CreateCustomAvailabilityZone(input *CreateCustomAvailabilityZoneInput) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// CreateCustomAvailabilityZoneWithContext is the same as CreateCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See CreateCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) CreateCustomAvailabilityZoneWithContext(ctx aws.Context, input *CreateCustomAvailabilityZoneInput, opts ...request.Option) (*CreateCustomAvailabilityZoneOutput, error) { + req, out := c.CreateCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opCreateDBCluster = "CreateDBCluster" // CreateDBClusterRequest generates a "aws/request.Request" representing the @@ -1712,11 +1804,11 @@ func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -2572,6 +2664,95 @@ func (c *RDS) CreateOptionGroupWithContext(ctx aws.Context, input *CreateOptionG return out, req.Send() } +const opDeleteCustomAvailabilityZone = "DeleteCustomAvailabilityZone" + +// DeleteCustomAvailabilityZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomAvailabilityZone operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteCustomAvailabilityZone for more information on using the DeleteCustomAvailabilityZone +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteCustomAvailabilityZoneRequest method. +// req, resp := client.DeleteCustomAvailabilityZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZoneRequest(input *DeleteCustomAvailabilityZoneInput) (req *request.Request, output *DeleteCustomAvailabilityZoneOutput) { + op := &request.Operation{ + Name: opDeleteCustomAvailabilityZone, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteCustomAvailabilityZoneInput{} + } + + output = &DeleteCustomAvailabilityZoneOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteCustomAvailabilityZone API operation for Amazon Relational Database Service. +// +// Deletes a custom Availability Zone (AZ). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteCustomAvailabilityZone for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" +// An error occurred accessing an AWS KMS key. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteCustomAvailabilityZone +func (c *RDS) DeleteCustomAvailabilityZone(input *DeleteCustomAvailabilityZoneInput) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + return out, req.Send() +} + +// DeleteCustomAvailabilityZoneWithContext is the same as DeleteCustomAvailabilityZone with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteCustomAvailabilityZone for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteCustomAvailabilityZoneWithContext(ctx aws.Context, input *DeleteCustomAvailabilityZoneInput, opts ...request.Option) (*DeleteCustomAvailabilityZoneOutput, error) { + req, out := c.DeleteCustomAvailabilityZoneRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteDBCluster = "DeleteDBCluster" // DeleteDBClusterRequest generates a "aws/request.Request" representing the @@ -3654,6 +3835,86 @@ func (c *RDS) DeleteGlobalClusterWithContext(ctx aws.Context, input *DeleteGloba return out, req.Send() } +const opDeleteInstallationMedia = "DeleteInstallationMedia" + +// DeleteInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInstallationMedia for more information on using the DeleteInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInstallationMediaRequest method. +// req, resp := client.DeleteInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMediaRequest(input *DeleteInstallationMediaInput) (req *request.Request, output *DeleteInstallationMediaOutput) { + op := &request.Operation{ + Name: opDeleteInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstallationMediaInput{} + } + + output = &DeleteInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteInstallationMedia API operation for Amazon Relational Database Service. +// +// Deletes the installation media for an on-premises, bring your own media (BYOM) +// DB engine, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DeleteInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation media. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DeleteInstallationMedia +func (c *RDS) DeleteInstallationMedia(input *DeleteInstallationMediaInput) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + return out, req.Send() +} + +// DeleteInstallationMediaWithContext is the same as DeleteInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DeleteInstallationMediaWithContext(ctx aws.Context, input *DeleteInstallationMediaInput, opts ...request.Option) (*DeleteInstallationMediaOutput, error) { + req, out := c.DeleteInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteOptionGroup = "DeleteOptionGroup" // DeleteOptionGroupRequest generates a "aws/request.Request" representing the @@ -3895,6 +4156,148 @@ func (c *RDS) DescribeCertificatesWithContext(ctx aws.Context, input *DescribeCe return out, req.Send() } +const opDescribeCustomAvailabilityZones = "DescribeCustomAvailabilityZones" + +// DescribeCustomAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomAvailabilityZones operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeCustomAvailabilityZones for more information on using the DescribeCustomAvailabilityZones +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeCustomAvailabilityZonesRequest method. +// req, resp := client.DescribeCustomAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZonesRequest(input *DescribeCustomAvailabilityZonesInput) (req *request.Request, output *DescribeCustomAvailabilityZonesOutput) { + op := &request.Operation{ + Name: opDescribeCustomAvailabilityZones, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeCustomAvailabilityZonesInput{} + } + + output = &DescribeCustomAvailabilityZonesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeCustomAvailabilityZones API operation for Amazon Relational Database Service. +// +// Returns information about custom Availability Zones (AZs). +// +// A custom AZ is an on-premises AZ that is integrated with a VMware vSphere +// cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeCustomAvailabilityZones for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeCustomAvailabilityZones +func (c *RDS) DescribeCustomAvailabilityZones(input *DescribeCustomAvailabilityZonesInput) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesWithContext is the same as DescribeCustomAvailabilityZones with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeCustomAvailabilityZones for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, opts ...request.Option) (*DescribeCustomAvailabilityZonesOutput, error) { + req, out := c.DescribeCustomAvailabilityZonesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeCustomAvailabilityZonesPages iterates over the pages of a DescribeCustomAvailabilityZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCustomAvailabilityZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCustomAvailabilityZones operation. +// pageNum := 0 +// err := client.DescribeCustomAvailabilityZonesPages(params, +// func(page *rds.DescribeCustomAvailabilityZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeCustomAvailabilityZonesPages(input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool) error { + return c.DescribeCustomAvailabilityZonesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeCustomAvailabilityZonesPagesWithContext same as DescribeCustomAvailabilityZonesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeCustomAvailabilityZonesPagesWithContext(ctx aws.Context, input *DescribeCustomAvailabilityZonesInput, fn func(*DescribeCustomAvailabilityZonesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeCustomAvailabilityZonesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeCustomAvailabilityZonesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeCustomAvailabilityZonesOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeDBClusterBacktracks = "DescribeDBClusterBacktracks" // DescribeDBClusterBacktracksRequest generates a "aws/request.Request" representing the @@ -6564,6 +6967,142 @@ func (c *RDS) DescribeGlobalClustersPagesWithContext(ctx aws.Context, input *Des return p.Err() } +const opDescribeInstallationMedia = "DescribeInstallationMedia" + +// DescribeInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeInstallationMedia for more information on using the DescribeInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeInstallationMediaRequest method. +// req, resp := client.DescribeInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMediaRequest(input *DescribeInstallationMediaInput) (req *request.Request, output *DescribeInstallationMediaOutput) { + op := &request.Operation{ + Name: opDescribeInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxRecords", + TruncationToken: "", + }, + } + + if input == nil { + input = &DescribeInstallationMediaInput{} + } + + output = &DescribeInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeInstallationMedia API operation for Amazon Relational Database Service. +// +// Describes the available installation media for on-premises, bring your own +// media (BYOM) DB engines, such as Microsoft SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation DescribeInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInstallationMediaNotFoundFault "InstallationMediaNotFound" +// InstallationMediaID doesn't refer to an existing installation media. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/DescribeInstallationMedia +func (c *RDS) DescribeInstallationMedia(input *DescribeInstallationMediaInput) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + return out, req.Send() +} + +// DescribeInstallationMediaWithContext is the same as DescribeInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, opts ...request.Option) (*DescribeInstallationMediaOutput, error) { + req, out := c.DescribeInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// DescribeInstallationMediaPages iterates over the pages of a DescribeInstallationMedia operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstallationMedia method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstallationMedia operation. +// pageNum := 0 +// err := client.DescribeInstallationMediaPages(params, +// func(page *rds.DescribeInstallationMediaOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *RDS) DescribeInstallationMediaPages(input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool) error { + return c.DescribeInstallationMediaPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// DescribeInstallationMediaPagesWithContext same as DescribeInstallationMediaPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) DescribeInstallationMediaPagesWithContext(ctx aws.Context, input *DescribeInstallationMediaInput, fn func(*DescribeInstallationMediaOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *DescribeInstallationMediaInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeInstallationMediaRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*DescribeInstallationMediaOutput), !p.HasNextPage()) + } + return p.Err() +} + const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" // DescribeOptionGroupOptionsRequest generates a "aws/request.Request" representing the @@ -7708,6 +8247,90 @@ func (c *RDS) FailoverDBClusterWithContext(ctx aws.Context, input *FailoverDBClu return out, req.Send() } +const opImportInstallationMedia = "ImportInstallationMedia" + +// ImportInstallationMediaRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstallationMedia operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ImportInstallationMedia for more information on using the ImportInstallationMedia +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ImportInstallationMediaRequest method. +// req, resp := client.ImportInstallationMediaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMediaRequest(input *ImportInstallationMediaInput) (req *request.Request, output *ImportInstallationMediaOutput) { + op := &request.Operation{ + Name: opImportInstallationMedia, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportInstallationMediaInput{} + } + + output = &ImportInstallationMediaOutput{} + req = c.newRequest(op, input, output) + return +} + +// ImportInstallationMedia API operation for Amazon Relational Database Service. +// +// Imports the installation media for an on-premises, bring your own media (BYOM) +// DB engine, such as SQL Server. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Relational Database Service's +// API operation ImportInstallationMedia for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCustomAvailabilityZoneNotFoundFault "CustomAvailabilityZoneNotFound" +// CustomAvailabilityZoneId doesn't refer to an existing custom Availability +// Zone identifier. +// +// * ErrCodeInstallationMediaAlreadyExistsFault "InstallationMediaAlreadyExists" +// The specified installation media has already been imported. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/rds-2014-10-31/ImportInstallationMedia +func (c *RDS) ImportInstallationMedia(input *ImportInstallationMediaInput) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + return out, req.Send() +} + +// ImportInstallationMediaWithContext is the same as ImportInstallationMedia with the addition of +// the ability to pass a context and additional request options. +// +// See ImportInstallationMedia for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *RDS) ImportInstallationMediaWithContext(ctx aws.Context, input *ImportInstallationMediaInput, opts ...request.Option) (*ImportInstallationMediaOutput, error) { + req, out := c.ImportInstallationMediaRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -8430,11 +9053,11 @@ func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *reques // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeCertificateNotFoundFault "CertificateNotFound" // CertificateIdentifier doesn't refer to an existing certificate. @@ -9710,7 +10333,7 @@ func (c *RDS) RemoveRoleFromDBInstanceRequest(input *RemoveRoleFromDBInstanceInp // DBInstanceIdentifier doesn't refer to an existing DB instance. // // * ErrCodeDBInstanceRoleNotFoundFault "DBInstanceRoleNotFound" -// The specified RoleArn value doesn't match the specifed feature for the DB +// The specified RoleArn value doesn't match the specified feature for the DB // instance. // // * ErrCodeInvalidDBInstanceStateFault "InvalidDBInstanceState" @@ -10661,11 +11284,11 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFro // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10813,11 +11436,11 @@ func (c *RDS) RestoreDBInstanceFromS3Request(input *RestoreDBInstanceFromS3Input // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -10966,11 +11589,11 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo // Storage of the StorageType specified can't be associated with the DB instance. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -11072,11 +11695,11 @@ func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIn // DBSecurityGroupName doesn't refer to an existing DB security group. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeInvalidDBSecurityGroupStateFault "InvalidDBSecurityGroupState" // The state of the DB security group doesn't allow deletion. @@ -11384,11 +12007,11 @@ func (c *RDS) StartDBInstanceRequest(input *StartDBInstanceInput) (req *request. // DBClusterIdentifier doesn't refer to an existing DB cluster. // // * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound" -// The specified CIDRIP or Amazon EC2 security group isn't authorized for the -// specified DB security group. +// The specified CIDR IP range or Amazon EC2 security group might not be authorized +// for the specified DB security group. // -// RDS also may not be authorized by using IAM to perform necessary actions -// on your behalf. +// Or, RDS might not be authorized to perform necessary actions using IAM on +// your behalf. // // * ErrCodeKMSKeyNotAccessibleFault "KMSKeyNotAccessibleFault" // An error occurred accessing an AWS KMS key. @@ -13572,6 +14195,105 @@ func (s *CopyOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CopyOptionGroupO return s } +type CreateCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The name of the custom Availability Zone (AZ). + // + // CustomAvailabilityZoneName is a required field + CustomAvailabilityZoneName *string `type:"string" required:"true"` + + // The ID of an existing virtual private network (VPN) between the Amazon RDS + // website and the VMware vSphere cluster. + ExistingVpnId *string `type:"string"` + + // The name of a new VPN tunnel between the Amazon RDS website and the VMware + // vSphere cluster. + // + // Specify this parameter only if ExistingVpnId is not specified. + NewVpnTunnelName *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + // + // Specify this parameter only if ExistingVpnId is not specified. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneName == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetCustomAvailabilityZoneName(v string) *CreateCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetExistingVpnId sets the ExistingVpnId field's value. +func (s *CreateCustomAvailabilityZoneInput) SetExistingVpnId(v string) *CreateCustomAvailabilityZoneInput { + s.ExistingVpnId = &v + return s +} + +// SetNewVpnTunnelName sets the NewVpnTunnelName field's value. +func (s *CreateCustomAvailabilityZoneInput) SetNewVpnTunnelName(v string) *CreateCustomAvailabilityZoneInput { + s.NewVpnTunnelName = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *CreateCustomAvailabilityZoneInput) SetVpnTunnelOriginatorIP(v string) *CreateCustomAvailabilityZoneInput { + s.VpnTunnelOriginatorIP = &v + return s +} + +type CreateCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s CreateCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *CreateCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *CreateCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type CreateDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -14628,6 +15350,13 @@ type CreateDBInstanceInput struct { // Constraint: The AvailabilityZone parameter can't be specified if the DB instance // is a Multi-AZ deployment. The specified Availability Zone must be in the // same AWS Region as the current endpoint. + // + // If you're creating a DB instance in an RDS on VMware environment, specify + // the identifier of the custom Availability Zone to create the DB instance + // in. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) AvailabilityZone *string `type:"string"` // The number of days for which automated backups are retained. Setting this @@ -16968,6 +17697,64 @@ func (s *CreateOptionGroupOutput) SetOptionGroup(v *OptionGroup) *CreateOptionGr return s } +// A custom Availability Zone (AZ) is an on-premises AZ that is integrated with +// a VMware vSphere cluster. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type CustomAvailabilityZone struct { + _ struct{} `type:"structure"` + + // The identifier of the custom AZ. + // + // Amazon RDS generates a unique identifier when a custom AZ is created. + CustomAvailabilityZoneId *string `type:"string"` + + // The name of the custom AZ. + CustomAvailabilityZoneName *string `type:"string"` + + // The status of the custom AZ. + CustomAvailabilityZoneStatus *string `type:"string"` + + // Information about the virtual private network (VPN) between the VMware vSphere + // cluster and the AWS website. + VpnDetails *VpnDetails `type:"structure"` +} + +// String returns the string representation +func (s CustomAvailabilityZone) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CustomAvailabilityZone) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneId(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetCustomAvailabilityZoneName sets the CustomAvailabilityZoneName field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneName(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneName = &v + return s +} + +// SetCustomAvailabilityZoneStatus sets the CustomAvailabilityZoneStatus field's value. +func (s *CustomAvailabilityZone) SetCustomAvailabilityZoneStatus(v string) *CustomAvailabilityZone { + s.CustomAvailabilityZoneStatus = &v + return s +} + +// SetVpnDetails sets the VpnDetails field's value. +func (s *CustomAvailabilityZone) SetVpnDetails(v *VpnDetails) *CustomAvailabilityZone { + s.VpnDetails = v + return s +} + // Contains the details of an Amazon Aurora DB cluster. // // This data type is used as a response element in the DescribeDBClusters, StopDBCluster, @@ -19980,6 +20767,71 @@ func (s *DBSubnetGroup) SetVpcId(v string) *DBSubnetGroup { return s } +type DeleteCustomAvailabilityZoneInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteCustomAvailabilityZoneInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteCustomAvailabilityZoneInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteCustomAvailabilityZoneInput) SetCustomAvailabilityZoneId(v string) *DeleteCustomAvailabilityZoneInput { + s.CustomAvailabilityZoneId = &v + return s +} + +type DeleteCustomAvailabilityZoneOutput struct { + _ struct{} `type:"structure"` + + // A custom Availability Zone (AZ) is an on-premises AZ that is integrated with + // a VMware vSphere cluster. + // + // For more information about RDS on VMware, see the RDS on VMware User Guide. + // (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) + CustomAvailabilityZone *CustomAvailabilityZone `type:"structure"` +} + +// String returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteCustomAvailabilityZoneOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZone sets the CustomAvailabilityZone field's value. +func (s *DeleteCustomAvailabilityZoneOutput) SetCustomAvailabilityZone(v *CustomAvailabilityZone) *DeleteCustomAvailabilityZoneOutput { + s.CustomAvailabilityZone = v + return s +} + type DeleteDBClusterEndpointInput struct { _ struct{} `type:"structure"` @@ -20939,6 +21791,133 @@ func (s *DeleteGlobalClusterOutput) SetGlobalCluster(v *GlobalCluster) *DeleteGl return s } +type DeleteInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The installation media ID. + // + // InstallationMediaId is a required field + InstallationMediaId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstallationMediaInput"} + if s.InstallationMediaId == nil { + invalidParams.Add(request.NewErrParamRequired("InstallationMediaId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaInput) SetInstallationMediaId(v string) *DeleteInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type DeleteInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s DeleteInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DeleteInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *DeleteInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *DeleteInstallationMediaOutput) SetEngine(v string) *DeleteInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *DeleteInstallationMediaOutput) SetEngineVersion(v string) *DeleteInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *DeleteInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *DeleteInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DeleteInstallationMediaOutput) SetInstallationMediaId(v string) *DeleteInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *DeleteInstallationMediaOutput) SetOSInstallationMediaPath(v string) *DeleteInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *DeleteInstallationMediaOutput) SetStatus(v string) *DeleteInstallationMediaOutput { + s.Status = &v + return s +} + type DeleteOptionGroupInput struct { _ struct{} `type:"structure"` @@ -21054,7 +22033,7 @@ type DescribeCertificatesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21151,6 +22130,119 @@ func (s *DescribeCertificatesOutput) SetMarker(v string) *DescribeCertificatesOu return s } +type DescribeCustomAvailabilityZonesInput struct { + _ struct{} `type:"structure"` + + // The custom AZ identifier. If this parameter is specified, information from + // only the specific custom AZ is returned. + CustomAvailabilityZoneId *string `type:"string"` + + // A filter that specifies one or more custom AZs to describe. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` + + // The maximum number of records to include in the response. If more records + // exist than the specified MaxRecords value, a pagination token called a marker + // is included in the response so that you can retrieve the remaining results. + // + // Default: 100 + // + // Constraints: Minimum 20, maximum 100. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeCustomAvailabilityZonesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeCustomAvailabilityZonesInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetCustomAvailabilityZoneId(v string) *DescribeCustomAvailabilityZonesInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetFilters(v []*Filter) *DescribeCustomAvailabilityZonesInput { + s.Filters = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMarker(v string) *DescribeCustomAvailabilityZonesInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeCustomAvailabilityZonesInput) SetMaxRecords(v int64) *DescribeCustomAvailabilityZonesInput { + s.MaxRecords = &v + return s +} + +type DescribeCustomAvailabilityZonesOutput struct { + _ struct{} `type:"structure"` + + // The list of CustomAvailabilityZone objects for the AWS account. + CustomAvailabilityZones []*CustomAvailabilityZone `locationNameList:"CustomAvailabilityZone" type:"list"` + + // An optional pagination token provided by a previous DescribeCustomAvailabilityZones + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeCustomAvailabilityZonesOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZones sets the CustomAvailabilityZones field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetCustomAvailabilityZones(v []*CustomAvailabilityZone) *DescribeCustomAvailabilityZonesOutput { + s.CustomAvailabilityZones = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeCustomAvailabilityZonesOutput) SetMarker(v string) *DescribeCustomAvailabilityZonesOutput { + s.Marker = &v + return s +} + type DescribeDBClusterBacktracksInput struct { _ struct{} `type:"structure"` @@ -21201,7 +22293,7 @@ type DescribeDBClusterBacktracksInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21334,7 +22426,7 @@ type DescribeDBClusterEndpointsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21457,7 +22549,7 @@ type DescribeDBClusterParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21576,7 +22668,7 @@ type DescribeDBClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21814,7 +22906,7 @@ type DescribeDBClusterSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -21994,7 +23086,7 @@ type DescribeDBClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22148,7 +23240,7 @@ type DescribeDBEngineVersionsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22322,7 +23414,7 @@ type DescribeDBInstanceAutomatedBackupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -22465,7 +23557,7 @@ type DescribeDBInstancesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22639,7 +23731,7 @@ type DescribeDBLogFilesInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. MaxRecords *int64 `type:"integer"` } @@ -22771,7 +23863,7 @@ type DescribeDBParameterGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -22891,7 +23983,7 @@ type DescribeDBParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23021,7 +24113,7 @@ type DescribeDBSecurityGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23251,7 +24343,7 @@ type DescribeDBSnapshotsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23425,7 +24517,7 @@ type DescribeDBSubnetGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23542,7 +24634,7 @@ type DescribeEngineDefaultClusterParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23649,7 +24741,7 @@ type DescribeEngineDefaultParametersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23829,7 +24921,7 @@ type DescribeEventSubscriptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -23958,7 +25050,7 @@ type DescribeEventsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24145,7 +25237,7 @@ type DescribeGlobalClustersInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24241,6 +25333,123 @@ func (s *DescribeGlobalClustersOutput) SetMarker(v string) *DescribeGlobalCluste return s } +type DescribeInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // A filter that specifies one or more installation media to describe. Supported + // filters include the following: + // + // * custom-availability-zone-id - Accepts custom Availability Zone (AZ) + // identifiers. The results list includes information about only the custom + // AZs identified by these identifiers. + // + // * engine - Accepts database engines. The results list includes information + // about only the database engines identified by these identifiers. For more + // information about the valid engines for installation media, see ImportInstallationMedia. + Filters []*Filter `locationNameList:"Filter" type:"list"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // An optional pagination token provided by a previous request. If this parameter + // is specified, the response includes only records beyond the marker, up to + // the value specified by MaxRecords. + Marker *string `type:"string"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + MaxRecords *int64 `type:"integer"` +} + +// String returns the string representation +func (s DescribeInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeInstallationMediaInput"} + if s.Filters != nil { + for i, v := range s.Filters { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Filters", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilters sets the Filters field's value. +func (s *DescribeInstallationMediaInput) SetFilters(v []*Filter) *DescribeInstallationMediaInput { + s.Filters = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *DescribeInstallationMediaInput) SetInstallationMediaId(v string) *DescribeInstallationMediaInput { + s.InstallationMediaId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaInput) SetMarker(v string) *DescribeInstallationMediaInput { + s.Marker = &v + return s +} + +// SetMaxRecords sets the MaxRecords field's value. +func (s *DescribeInstallationMediaInput) SetMaxRecords(v int64) *DescribeInstallationMediaInput { + s.MaxRecords = &v + return s +} + +type DescribeInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The list of InstallationMedia objects for the AWS account. + InstallationMedia []*InstallationMedia `locationNameList:"InstallationMedia" type:"list"` + + // An optional pagination token provided by a previous DescribeInstallationMedia + // request. If this parameter is specified, the response includes only records + // beyond the marker, up to the value specified by MaxRecords. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s DescribeInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetInstallationMedia sets the InstallationMedia field's value. +func (s *DescribeInstallationMediaOutput) SetInstallationMedia(v []*InstallationMedia) *DescribeInstallationMediaOutput { + s.InstallationMedia = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *DescribeInstallationMediaOutput) SetMarker(v string) *DescribeInstallationMediaOutput { + s.Marker = &v + return s +} + type DescribeOptionGroupOptionsInput struct { _ struct{} `type:"structure"` @@ -24263,7 +25472,7 @@ type DescribeOptionGroupOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24390,7 +25599,7 @@ type DescribeOptionGroupsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24533,7 +25742,7 @@ type DescribeOrderableDBInstanceOptionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24686,7 +25895,7 @@ type DescribePendingMaintenanceActionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24816,7 +26025,7 @@ type DescribeReservedDBInstancesInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -24965,7 +26174,7 @@ type DescribeReservedDBInstancesOfferingsInput struct { // The maximum number of records to include in the response. If more than the // MaxRecords value is available, a pagination token called a marker is included - // in the response so that the following results can be retrieved. + // in the response so that you can retrieve the reamaining results. // // Default: 100 // @@ -25164,7 +26373,7 @@ type DescribeSourceRegionsInput struct { // The maximum number of records to include in the response. If more records // exist than the specified MaxRecords value, a pagination token called a marker - // is included in the response so that the remaining results can be retrieved. + // is included in the response so that you can retrieve the remaining results. // // Default: 100 // @@ -26276,6 +27485,334 @@ func (s *IPRange) SetStatus(v string) *IPRange { return s } +type ImportInstallationMediaInput struct { + _ struct{} `type:"structure"` + + // The identifier of the custom Availability Zone (AZ) to import the installation + // media to. + // + // CustomAvailabilityZoneId is a required field + CustomAvailabilityZoneId *string `type:"string" required:"true"` + + // The name of the database engine to be used for this instance. + // + // The list only includes supported on-premises, bring your own media (BYOM) + // DB engines. + // + // Valid Values: + // + // * sqlserver-ee + // + // * sqlserver-se + // + // * sqlserver-ex + // + // * sqlserver-web + // + // Engine is a required field + Engine *string `type:"string" required:"true"` + + // The path to the installation media for the specified DB engine. + // + // Example: SQLServerISO/en_sql_server_2016_enterprise_x64_dvd_8701793.iso + // + // EngineInstallationMediaPath is a required field + EngineInstallationMediaPath *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // For a list of valid engine versions, call DescribeDBEngineVersions. + // + // The following are the database engines and links to information about the + // major and minor versions. The list only includes supported on-premises, bring + // your own media (BYOM) DB engines. + // + // Microsoft SQL Server + // + // See Version and Feature Support on Amazon RDS (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.FeatureSupport) + // in the Amazon RDS User Guide. + // + // EngineVersion is a required field + EngineVersion *string `type:"string" required:"true"` + + // The path to the installation media for the operating system associated with + // the specified DB engine. + // + // Example: WindowsISO/en_windows_server_2016_x64_dvd_9327751.iso + // + // OSInstallationMediaPath is a required field + OSInstallationMediaPath *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ImportInstallationMediaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportInstallationMediaInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportInstallationMediaInput"} + if s.CustomAvailabilityZoneId == nil { + invalidParams.Add(request.NewErrParamRequired("CustomAvailabilityZoneId")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.EngineInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("EngineInstallationMediaPath")) + } + if s.EngineVersion == nil { + invalidParams.Add(request.NewErrParamRequired("EngineVersion")) + } + if s.OSInstallationMediaPath == nil { + invalidParams.Add(request.NewErrParamRequired("OSInstallationMediaPath")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaInput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaInput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaInput) SetEngine(v string) *ImportInstallationMediaInput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaInput) SetEngineVersion(v string) *ImportInstallationMediaInput { + s.EngineVersion = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaInput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaInput { + s.OSInstallationMediaPath = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type ImportInstallationMediaOutput struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s ImportInstallationMediaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportInstallationMediaOutput) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *ImportInstallationMediaOutput) SetCustomAvailabilityZoneId(v string) *ImportInstallationMediaOutput { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *ImportInstallationMediaOutput) SetEngine(v string) *ImportInstallationMediaOutput { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetEngineInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *ImportInstallationMediaOutput) SetEngineVersion(v string) *ImportInstallationMediaOutput { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *ImportInstallationMediaOutput) SetFailureCause(v *InstallationMediaFailureCause) *ImportInstallationMediaOutput { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *ImportInstallationMediaOutput) SetInstallationMediaId(v string) *ImportInstallationMediaOutput { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *ImportInstallationMediaOutput) SetOSInstallationMediaPath(v string) *ImportInstallationMediaOutput { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ImportInstallationMediaOutput) SetStatus(v string) *ImportInstallationMediaOutput { + s.Status = &v + return s +} + +// Contains the installation media for on-premises, bring your own media (BYOM) +// DB engines, such as Microsoft SQL Server. +type InstallationMedia struct { + _ struct{} `type:"structure"` + + // The custom Availability Zone (AZ) that contains the installation media. + CustomAvailabilityZoneId *string `type:"string"` + + // The DB engine. + Engine *string `type:"string"` + + // The path to the installation media for the DB engine. + EngineInstallationMediaPath *string `type:"string"` + + // The engine version of the DB engine. + EngineVersion *string `type:"string"` + + // If an installation media failure occurred, the cause of the failure. + FailureCause *InstallationMediaFailureCause `type:"structure"` + + // The installation media ID. + InstallationMediaId *string `type:"string"` + + // The path to the installation media for the operating system associated with + // the DB engine. + OSInstallationMediaPath *string `type:"string"` + + // The status of the installation media. + Status *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMedia) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMedia) GoString() string { + return s.String() +} + +// SetCustomAvailabilityZoneId sets the CustomAvailabilityZoneId field's value. +func (s *InstallationMedia) SetCustomAvailabilityZoneId(v string) *InstallationMedia { + s.CustomAvailabilityZoneId = &v + return s +} + +// SetEngine sets the Engine field's value. +func (s *InstallationMedia) SetEngine(v string) *InstallationMedia { + s.Engine = &v + return s +} + +// SetEngineInstallationMediaPath sets the EngineInstallationMediaPath field's value. +func (s *InstallationMedia) SetEngineInstallationMediaPath(v string) *InstallationMedia { + s.EngineInstallationMediaPath = &v + return s +} + +// SetEngineVersion sets the EngineVersion field's value. +func (s *InstallationMedia) SetEngineVersion(v string) *InstallationMedia { + s.EngineVersion = &v + return s +} + +// SetFailureCause sets the FailureCause field's value. +func (s *InstallationMedia) SetFailureCause(v *InstallationMediaFailureCause) *InstallationMedia { + s.FailureCause = v + return s +} + +// SetInstallationMediaId sets the InstallationMediaId field's value. +func (s *InstallationMedia) SetInstallationMediaId(v string) *InstallationMedia { + s.InstallationMediaId = &v + return s +} + +// SetOSInstallationMediaPath sets the OSInstallationMediaPath field's value. +func (s *InstallationMedia) SetOSInstallationMediaPath(v string) *InstallationMedia { + s.OSInstallationMediaPath = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *InstallationMedia) SetStatus(v string) *InstallationMedia { + s.Status = &v + return s +} + +// Contains the cause of an installation media failure. Installation media is +// used for on-premises, bring your own media (BYOM) DB engines, such as Microsoft +// SQL Server. +type InstallationMediaFailureCause struct { + _ struct{} `type:"structure"` + + // The reason that an installation media import failed. + Message *string `type:"string"` +} + +// String returns the string representation +func (s InstallationMediaFailureCause) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstallationMediaFailureCause) GoString() string { + return s.String() +} + +// SetMessage sets the Message field's value. +func (s *InstallationMediaFailureCause) SetMessage(v string) *InstallationMediaFailureCause { + s.Message = &v + return s +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` @@ -35322,6 +36859,80 @@ func (s *VpcSecurityGroupMembership) SetVpcSecurityGroupId(v string) *VpcSecurit return s } +// Information about the virtual private network (VPN) between the VMware vSphere +// cluster and the AWS website. +// +// For more information about RDS on VMware, see the RDS on VMware User Guide. +// (https://docs.aws.amazon.com/AmazonRDS/latest/RDSonVMwareUserGuide/rds-on-vmware.html) +type VpnDetails struct { + _ struct{} `type:"structure"` + + // The IP address of network traffic from AWS to your on-premises data center. + VpnGatewayIp *string `type:"string"` + + // The ID of the VPN. + VpnId *string `type:"string"` + + // The name of the VPN. + VpnName *string `type:"string"` + + // The preshared key (PSK) for the VPN. + VpnPSK *string `type:"string" sensitive:"true"` + + // The state of the VPN. + VpnState *string `type:"string"` + + // The IP address of network traffic from your on-premises data center. A custom + // AZ receives the network traffic. + VpnTunnelOriginatorIP *string `type:"string"` +} + +// String returns the string representation +func (s VpnDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VpnDetails) GoString() string { + return s.String() +} + +// SetVpnGatewayIp sets the VpnGatewayIp field's value. +func (s *VpnDetails) SetVpnGatewayIp(v string) *VpnDetails { + s.VpnGatewayIp = &v + return s +} + +// SetVpnId sets the VpnId field's value. +func (s *VpnDetails) SetVpnId(v string) *VpnDetails { + s.VpnId = &v + return s +} + +// SetVpnName sets the VpnName field's value. +func (s *VpnDetails) SetVpnName(v string) *VpnDetails { + s.VpnName = &v + return s +} + +// SetVpnPSK sets the VpnPSK field's value. +func (s *VpnDetails) SetVpnPSK(v string) *VpnDetails { + s.VpnPSK = &v + return s +} + +// SetVpnState sets the VpnState field's value. +func (s *VpnDetails) SetVpnState(v string) *VpnDetails { + s.VpnState = &v + return s +} + +// SetVpnTunnelOriginatorIP sets the VpnTunnelOriginatorIP field's value. +func (s *VpnDetails) SetVpnTunnelOriginatorIP(v string) *VpnDetails { + s.VpnTunnelOriginatorIP = &v + return s +} + const ( // ActivityStreamModeSync is a ActivityStreamMode enum value ActivityStreamModeSync = "sync" diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go b/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go index bf5140b1477..6a263d8704d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/doc.go @@ -6,9 +6,9 @@ // // Amazon Relational Database Service (Amazon RDS) is a web service that makes // it easier to set up, operate, and scale a relational database in the cloud. -// It provides cost-efficient, resizable capacity for an industry-standard relational -// database and manages common database administration tasks, freeing up developers -// to focus on what makes their applications and businesses unique. +// It provides cost-efficient, resizeable capacity for an industry-standard +// relational database and manages common database administration tasks, freeing +// up developers to focus on what makes their applications and businesses unique. // // Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, // Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go index 86ee24482b0..680626728f7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/errors.go @@ -7,18 +7,18 @@ const ( // ErrCodeAuthorizationAlreadyExistsFault for service response error code // "AuthorizationAlreadyExists". // - // The specified CIDRIP or Amazon EC2 security group is already authorized for - // the specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group is already authorized + // for the specified DB security group. ErrCodeAuthorizationAlreadyExistsFault = "AuthorizationAlreadyExists" // ErrCodeAuthorizationNotFoundFault for service response error code // "AuthorizationNotFound". // - // The specified CIDRIP or Amazon EC2 security group isn't authorized for the - // specified DB security group. + // The specified CIDR IP range or Amazon EC2 security group might not be authorized + // for the specified DB security group. // - // RDS also may not be authorized by using IAM to perform necessary actions - // on your behalf. + // Or, RDS might not be authorized to perform necessary actions using IAM on + // your behalf. ErrCodeAuthorizationNotFoundFault = "AuthorizationNotFound" // ErrCodeAuthorizationQuotaExceededFault for service response error code @@ -37,6 +37,26 @@ const ( // CertificateIdentifier doesn't refer to an existing certificate. ErrCodeCertificateNotFoundFault = "CertificateNotFound" + // ErrCodeCustomAvailabilityZoneAlreadyExistsFault for service response error code + // "CustomAvailabilityZoneAlreadyExists". + // + // CustomAvailabilityZoneName is already used by an existing custom Availability + // Zone. + ErrCodeCustomAvailabilityZoneAlreadyExistsFault = "CustomAvailabilityZoneAlreadyExists" + + // ErrCodeCustomAvailabilityZoneNotFoundFault for service response error code + // "CustomAvailabilityZoneNotFound". + // + // CustomAvailabilityZoneId doesn't refer to an existing custom Availability + // Zone identifier. + ErrCodeCustomAvailabilityZoneNotFoundFault = "CustomAvailabilityZoneNotFound" + + // ErrCodeCustomAvailabilityZoneQuotaExceededFault for service response error code + // "CustomAvailabilityZoneQuotaExceeded". + // + // You have exceeded the maximum number of custom Availability Zones. + ErrCodeCustomAvailabilityZoneQuotaExceededFault = "CustomAvailabilityZoneQuotaExceeded" + // ErrCodeDBClusterAlreadyExistsFault for service response error code // "DBClusterAlreadyExistsFault". // @@ -156,7 +176,7 @@ const ( // ErrCodeDBInstanceRoleNotFoundFault for service response error code // "DBInstanceRoleNotFound". // - // The specified RoleArn value doesn't match the specifed feature for the DB + // The specified RoleArn value doesn't match the specified feature for the DB // instance. ErrCodeDBInstanceRoleNotFoundFault = "DBInstanceRoleNotFound" @@ -300,6 +320,18 @@ const ( // "GlobalClusterQuotaExceededFault". ErrCodeGlobalClusterQuotaExceededFault = "GlobalClusterQuotaExceededFault" + // ErrCodeInstallationMediaAlreadyExistsFault for service response error code + // "InstallationMediaAlreadyExists". + // + // The specified installation media has already been imported. + ErrCodeInstallationMediaAlreadyExistsFault = "InstallationMediaAlreadyExists" + + // ErrCodeInstallationMediaNotFoundFault for service response error code + // "InstallationMediaNotFound". + // + // InstallationMediaID doesn't refer to an existing installation media. + ErrCodeInstallationMediaNotFoundFault = "InstallationMediaNotFound" + // ErrCodeInstanceQuotaExceededFault for service response error code // "InstanceQuotaExceeded". // diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go index f2d0efaf7d0..3ae523764c1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -46,11 +46,11 @@ const ( // svc := rds.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *RDS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *RDS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *RDS { svc := &RDS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-10-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go index a750d141c62..fa2fea7a89f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -46,11 +46,11 @@ const ( // svc := redshift.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Redshift { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Redshift { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Redshift { svc := &Redshift{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go index 46a19ff2316..82f50ca25d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/resourcegroups/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ResourceGroups { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "resource-groups" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ResourceGroups { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ResourceGroups { svc := &ResourceGroups{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-11-27", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go index dd22cb2cd84..391c3e28615 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -46,11 +46,11 @@ const ( // svc := route53.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53 { svc := &Route53{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-04-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go index 367e933ebb9..7b8dd4bf715 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53resolver/service.go @@ -46,11 +46,11 @@ const ( // svc := route53resolver.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Route53Resolver { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Route53Resolver { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Route53Resolver { svc := &Route53Resolver{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-04-01", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index b4a4e8c4ad7..91bf5225ddf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -22464,6 +22464,41 @@ func (s SSES3) GoString() string { return s.String() } +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. + Start *int64 `type:"long"` +} + +// String returns the string representation +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + // SelectObjectContentEventStream provides handling of EventStreams for // the SelectObjectContent API. // @@ -22503,6 +22538,8 @@ type SelectObjectContentEventStream struct { // may result in resource leaks. func (es *SelectObjectContentEventStream) Close() (err error) { es.Reader.Close() + es.StreamCloser.Close() + return es.Err() } @@ -22512,8 +22549,6 @@ func (es *SelectObjectContentEventStream) Err() error { if err := es.Reader.Err(); err != nil { return err } - es.StreamCloser.Close() - return nil } @@ -22738,6 +22773,12 @@ type SelectObjectContentInput struct { // The SSE Customer Key MD5. For more information, see Server-Side Encryption // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + ScanRange *ScanRange `type:"structure"` } // String returns the string representation @@ -22858,6 +22899,12 @@ func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectC return s } +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + type SelectObjectContentOutput struct { _ struct{} `type:"structure" payload:"Payload"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index d17dcc9dadc..07e1297371b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -46,11 +46,11 @@ const ( // svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3 { svc := &S3{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2006-03-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go index 377c9d55d55..827741ce235 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3control/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3Control { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "s3" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3Control { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *S3Control { svc := &S3Control{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-08-20", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go index ed54c0f3258..29e70ae97eb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/api.go @@ -26538,6 +26538,15 @@ const ( // NotebookInstanceAcceleratorTypeMlEia1Xlarge is a NotebookInstanceAcceleratorType enum value NotebookInstanceAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // NotebookInstanceAcceleratorTypeMlEia2Medium is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // NotebookInstanceAcceleratorTypeMlEia2Large is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // NotebookInstanceAcceleratorTypeMlEia2Xlarge is a NotebookInstanceAcceleratorType enum value + NotebookInstanceAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26666,6 +26675,15 @@ const ( // ProductionVariantAcceleratorTypeMlEia1Xlarge is a ProductionVariantAcceleratorType enum value ProductionVariantAcceleratorTypeMlEia1Xlarge = "ml.eia1.xlarge" + + // ProductionVariantAcceleratorTypeMlEia2Medium is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Medium = "ml.eia2.medium" + + // ProductionVariantAcceleratorTypeMlEia2Large is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Large = "ml.eia2.large" + + // ProductionVariantAcceleratorTypeMlEia2Xlarge is a ProductionVariantAcceleratorType enum value + ProductionVariantAcceleratorTypeMlEia2Xlarge = "ml.eia2.xlarge" ) const ( @@ -26714,6 +26732,24 @@ const ( // ProductionVariantInstanceTypeMlM524xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlM524xlarge = "ml.m5.24xlarge" + // ProductionVariantInstanceTypeMlM5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dLarge = "ml.m5d.large" + + // ProductionVariantInstanceTypeMlM5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5dXlarge = "ml.m5d.xlarge" + + // ProductionVariantInstanceTypeMlM5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d2xlarge = "ml.m5d.2xlarge" + + // ProductionVariantInstanceTypeMlM5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d4xlarge = "ml.m5d.4xlarge" + + // ProductionVariantInstanceTypeMlM5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d12xlarge = "ml.m5d.12xlarge" + + // ProductionVariantInstanceTypeMlM5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlM5d24xlarge = "ml.m5d.24xlarge" + // ProductionVariantInstanceTypeMlC4Large is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC4Large = "ml.c4.large" @@ -26765,6 +26801,24 @@ const ( // ProductionVariantInstanceTypeMlC518xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlC518xlarge = "ml.c5.18xlarge" + // ProductionVariantInstanceTypeMlC5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dLarge = "ml.c5d.large" + + // ProductionVariantInstanceTypeMlC5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5dXlarge = "ml.c5d.xlarge" + + // ProductionVariantInstanceTypeMlC5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d2xlarge = "ml.c5d.2xlarge" + + // ProductionVariantInstanceTypeMlC5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d4xlarge = "ml.c5d.4xlarge" + + // ProductionVariantInstanceTypeMlC5d9xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d9xlarge = "ml.c5d.9xlarge" + + // ProductionVariantInstanceTypeMlC5d18xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlC5d18xlarge = "ml.c5d.18xlarge" + // ProductionVariantInstanceTypeMlG4dnXlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlG4dnXlarge = "ml.g4dn.xlarge" @@ -26800,6 +26854,24 @@ const ( // ProductionVariantInstanceTypeMlR524xlarge is a ProductionVariantInstanceType enum value ProductionVariantInstanceTypeMlR524xlarge = "ml.r5.24xlarge" + + // ProductionVariantInstanceTypeMlR5dLarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dLarge = "ml.r5d.large" + + // ProductionVariantInstanceTypeMlR5dXlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5dXlarge = "ml.r5d.xlarge" + + // ProductionVariantInstanceTypeMlR5d2xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d2xlarge = "ml.r5d.2xlarge" + + // ProductionVariantInstanceTypeMlR5d4xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d4xlarge = "ml.r5d.4xlarge" + + // ProductionVariantInstanceTypeMlR5d12xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d12xlarge = "ml.r5d.12xlarge" + + // ProductionVariantInstanceTypeMlR5d24xlarge is a ProductionVariantInstanceType enum value + ProductionVariantInstanceTypeMlR5d24xlarge = "ml.r5d.24xlarge" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go index 7ae1df73414..ae95a939639 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sagemaker/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SageMaker { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "sagemaker" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SageMaker { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SageMaker { svc := &SageMaker{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-07-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go index c4758e96dac..eeca2d9fd68 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/secretsmanager/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecretsManager { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "secretsmanager" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecretsManager { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecretsManager { svc := &SecretsManager{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-10-17", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go index 113ce37f3e1..cdb48ddf0c8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/securityhub/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SecurityHub { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "securityhub" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SecurityHub { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SecurityHub { svc := &SecurityHub{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-10-26", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go index 5702ab93313..7ceaec7a2fd 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/serverlessapplicationrepository/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServerlessApplicationRep if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "serverlessrepo" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServerlessApplicationRepository { svc := &ServerlessApplicationRepository{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-09-08", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go index f15a5b8024d..718d9a1486d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicecatalog/service.go @@ -46,11 +46,11 @@ const ( // svc := servicecatalog.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceCatalog { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceCatalog { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceCatalog { svc := &ServiceCatalog{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-12-10", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go index 3463e12c241..77d77772c90 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicediscovery/service.go @@ -46,11 +46,11 @@ const ( // svc := servicediscovery.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceDiscovery { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceDiscovery { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceDiscovery { svc := &ServiceDiscovery{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2017-03-14", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go index 6404a922552..4c064181c98 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/servicequotas/service.go @@ -46,11 +46,11 @@ const ( // svc := servicequotas.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *ServiceQuotas { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *ServiceQuotas { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ServiceQuotas { svc := &ServiceQuotas{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2019-06-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go index 0e33b771f53..09028e10453 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "ses" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SES { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SES { svc := &SES{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-12-01", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go index 2436268f075..f21c6d9d82a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sfn/service.go @@ -46,11 +46,11 @@ const ( // svc := sfn.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SFN { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SFN { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SFN { svc := &SFN{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-23", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go index b7a62ef92f6..499dea15df5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/shield/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/shield/service.go @@ -46,11 +46,11 @@ const ( // svc := shield.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *Shield { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Shield { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Shield { svc := &Shield{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-06-02", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go index d4de27413cb..75fd3d60108 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -47,11 +47,11 @@ const ( // svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SimpleDB { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SimpleDB { svc := &SimpleDB{ Client: client.New( cfg, @@ -60,6 +60,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2009-04-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index 96d7c8ba05c..aa8aff7d6e1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -46,11 +46,11 @@ const ( // svc := sns.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SNS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SNS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SNS { svc := &SNS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2010-03-31", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go index d463ecf0ddb..7bac89c4ae9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -46,11 +46,11 @@ const ( // svc := sqs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SQS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SQS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SQS { svc := &SQS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-11-05", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go index 9a6b8f71c22..c66bfba90cc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ssm/service.go @@ -46,11 +46,11 @@ const ( // svc := ssm.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSM { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SSM { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SSM { svc := &SSM{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2014-11-06", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go index 9a0c08f6962..1e4f312be8c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/storagegateway/service.go @@ -46,11 +46,11 @@ const ( // svc := storagegateway.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *StorageGateway { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *StorageGateway { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *StorageGateway { svc := &StorageGateway{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2013-06-30", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index eb0a6a417ef..9c5ed454536 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -676,9 +676,9 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // Returned Error Codes: // * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" -// The error returned if the message passed to DecodeAuthorizationMessage was -// invalid. This can happen if the token contains invalid characters, such as -// linebreaks. +// This error is returned if the message passed to DecodeAuthorizationMessage +// was invalid. This can happen if the token contains invalid characters, such +// as linebreaks. // // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index 41ea09c356c..a3e378edad3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -34,9 +34,9 @@ const ( // ErrCodeInvalidAuthorizationMessageException for service response error code // "InvalidAuthorizationMessageException". // - // The error returned if the message passed to DecodeAuthorizationMessage was - // invalid. This can happen if the token contains invalid characters, such as - // linebreaks. + // This error is returned if the message passed to DecodeAuthorizationMessage + // was invalid. This can happen if the token contains invalid characters, such + // as linebreaks. ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" // ErrCodeInvalidIdentityTokenException for service response error code diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 185c914d1b3..2c3c3d2c1ed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -46,11 +46,11 @@ const ( // svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *STS { svc := &STS{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2011-06-15", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go index 014d89a5241..c30e411bd0f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/swf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/swf/service.go @@ -46,11 +46,11 @@ const ( // svc := swf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *SWF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *SWF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *SWF { svc := &SWF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2012-01-25", JSONVersion: "1.0", diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go index 575135be927..8088bea141f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/api.go @@ -368,6 +368,11 @@ func (c *Transfer) DeleteSshPublicKeyRequest(input *DeleteSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/DeleteSshPublicKey func (c *Transfer) DeleteSshPublicKey(input *DeleteSshPublicKeyInput) (*DeleteSshPublicKeyOutput, error) { req, out := c.DeleteSshPublicKeyRequest(input) @@ -746,6 +751,11 @@ func (c *Transfer) ImportSshPublicKeyRequest(input *ImportSshPublicKeyInput) (re // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/ImportSshPublicKey func (c *Transfer) ImportSshPublicKey(input *ImportSshPublicKeyInput) (*ImportSshPublicKeyOutput, error) { req, out := c.ImportSshPublicKeyRequest(input) @@ -1286,6 +1296,11 @@ func (c *Transfer) StartServerRequest(input *StartServerInput) (req *request.Req // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StartServer func (c *Transfer) StartServer(input *StartServerInput) (*StartServerOutput, error) { req, out := c.StartServerRequest(input) @@ -1387,6 +1402,11 @@ func (c *Transfer) StopServerRequest(input *StopServerInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/StopServer func (c *Transfer) StopServer(input *StopServerInput) (*StopServerOutput, error) { req, out := c.StopServerRequest(input) @@ -1477,6 +1497,10 @@ func (c *Transfer) TagResourceRequest(input *TagResourceInput) (req *request.Req // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/TagResource func (c *Transfer) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { req, out := c.TagResourceRequest(input) @@ -1661,6 +1685,10 @@ func (c *Transfer) UntagResourceRequest(input *UntagResourceInput) (req *request // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// This exception is thrown when a resource is not found by the AWS Transfer +// for SFTP service. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UntagResource func (c *Transfer) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { req, out := c.UntagResourceRequest(input) @@ -1750,10 +1778,18 @@ func (c *Transfer) UpdateServerRequest(input *UpdateServerInput) (req *request.R // * ErrCodeInvalidRequestException "InvalidRequestException" // This exception is thrown when the client submits a malformed request. // +// * ErrCodeResourceExistsException "ResourceExistsException" +// The requested resource does not exist. +// // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateServer func (c *Transfer) UpdateServer(input *UpdateServerInput) (*UpdateServerOutput, error) { req, out := c.UpdateServerRequest(input) @@ -1848,6 +1884,11 @@ func (c *Transfer) UpdateUserRequest(input *UpdateUserInput) (req *request.Reque // This exception is thrown when a resource is not found by the AWS Transfer // for SFTP service. // +// * ErrCodeThrottlingException "ThrottlingException" +// The request was denied due to request throttling. +// +// HTTP Status Code: 400 +// // See also, https://docs.aws.amazon.com/goto/WebAPI/transfer-2018-11-05/UpdateUser func (c *Transfer) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { req, out := c.UpdateUserRequest(input) @@ -1890,7 +1931,7 @@ type CreateServerInput struct { // to a new AWS SFTP server, don't update the host key. Accidentally changing // a server's host key can be disruptive. // - // For more information, see "https://docs.aws.amazon.com/transfer/latest/userguide/change-host-key" + // For more information, see "https://alpha-docs-aws.amazon.com/transfer/latest/userguide/configuring-servers.html#change-host-key" // in the AWS SFTP User Guide. HostKey *string `type:"string" sensitive:"true"` @@ -1910,7 +1951,7 @@ type CreateServerInput struct { // A value that allows the service to write your SFTP users' activity to your // Amazon CloudWatch logs for monitoring and auditing purposes. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // Key-value pairs that can be used to group and search for servers. Tags []*Tag `min:"1" type:"list"` @@ -1929,9 +1970,22 @@ func (s CreateServerInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateServerInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateServerInput"} + if s.LoggingRole != nil && len(*s.LoggingRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("LoggingRole", 20)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -1997,7 +2051,7 @@ type CreateServerOutput struct { // The service-assigned ID of the SFTP server that is created. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2020,9 +2074,33 @@ type CreateUserInput struct { _ struct{} `type:"structure"` // The landing directory (folder) for a user when they log in to the server - // using their SFTP client. An example is /home/username . + // using their SFTP client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP server. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // A scope-down policy for your user so you can use the same IAM role across // multiple users. This policy scopes down user access to portions of their // Amazon S3 bucket. Variables that you can use inside this policy include ${Transfer:UserName}, @@ -2047,13 +2125,13 @@ type CreateUserInput struct { // SFTP user's transfer requests. // // Role is a required field - Role *string `type:"string" required:"true"` + Role *string `min:"20" type:"string" required:"true"` // A system-assigned unique identifier for an SFTP server instance. This is // the specific SFTP server that you added your user to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The public portion of the Secure Shell (SSH) key used to authenticate the // user to the SFTP server. @@ -2069,7 +2147,7 @@ type CreateUserInput struct { // underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2085,18 +2163,40 @@ func (s CreateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } if s.Role == nil { invalidParams.Add(request.NewErrParamRequired("Role")) } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.Tags != nil && len(s.Tags) < 1 { invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if s.Tags != nil { for i, v := range s.Tags { if v == nil { @@ -2120,6 +2220,18 @@ func (s *CreateUserInput) SetHomeDirectory(v string) *CreateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *CreateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *CreateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *CreateUserInput) SetHomeDirectoryType(v string) *CreateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *CreateUserInput) SetPolicy(v string) *CreateUserInput { s.Policy = &v @@ -2162,12 +2274,12 @@ type CreateUserOutput struct { // The ID of the SFTP server that the user is attached to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user account associated with an SFTP server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2198,7 +2310,7 @@ type DeleteServerInput struct { // A unique system-assigned identifier for an SFTP server instance. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2217,6 +2329,9 @@ func (s *DeleteServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2251,17 +2366,17 @@ type DeleteSshPublicKeyInput struct { // server instance that has the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique identifier used to reference your user's specific SSH key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A unique string that identifies a user whose public key is being deleted. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2280,12 +2395,21 @@ func (s *DeleteSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyId == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyId")) } + if s.SshPublicKeyId != nil && len(*s.SshPublicKeyId) < 21 { + invalidParams.Add(request.NewErrParamMinLen("SshPublicKeyId", 21)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2332,12 +2456,12 @@ type DeleteUserInput struct { // the user assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user that is being deleted from the server. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2356,9 +2480,15 @@ func (s *DeleteUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2398,7 +2528,7 @@ type DescribeServerInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -2417,6 +2547,9 @@ func (s *DescribeServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2462,14 +2595,14 @@ type DescribeUserInput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The name of the user assigned to one or more servers. User names are part // of the sign-in credentials to use the AWS Transfer for SFTP service and perform // file transfer tasks. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2488,9 +2621,15 @@ func (s *DescribeUserInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2517,7 +2656,7 @@ type DescribeUserOutput struct { // assigned. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // An array containing the properties of the user account for the ServerID value // that you specified. @@ -2588,11 +2727,11 @@ type DescribedServer struct { // This property is an AWS Identity and Access Management (IAM) entity that // allows the server to turn on Amazon CloudWatch logging for Amazon S3 events. // When set, user activity can be viewed in your CloudWatch logs. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // This property is a unique system-assigned identifier for the SFTP server // that you instantiate. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // The condition of the SFTP server for the server that was described. A value // of ONLINE indicates that the server can accept jobs and transfer files. A @@ -2701,9 +2840,32 @@ type DescribedUser struct { // This property specifies the landing directory (or folder), which is the location // that files are written to or read from in an Amazon S3 bucket for the described - // user. An example is /bucket_name/home/username . + // user. An example is /your s3 bucket name/home/username . HomeDirectory *string `type:"string"` + // Logical directory mappings that you specified for what S3 paths and keys + // should be visible to your user and how you want to make them visible. You + // will need to specify the "Entry" and "Target" pair, where Entry shows how + // the path is made visible and Target is the actual S3 path. If you only specify + // a target, it will be displayed as is. You will need to also make sure that + // your AWS IAM Role provides access to paths in Target. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you mapped for your users' to see + // when they log into the SFTP server. If you set it to PATH, the user will + // see the absolute Amazon S3 bucket paths as is in their SFTP clients. If you + // set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Specifies the name of the policy in use for the described user. Policy *string `type:"string"` @@ -2713,7 +2875,7 @@ type DescribedUser struct { // into and out of your Amazon S3 bucket or buckets. The IAM role should also // contain a trust relationship that allows the SFTP server to access your resources // when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This property contains the public key portion of the Secure Shell (SSH) keys // stored for the described user. @@ -2726,7 +2888,7 @@ type DescribedUser struct { // This property is the name of the user that was requested to be described. // User names are used for authentication purposes. This is the string that // will be used by your user when they log in to your SFTP server. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -2751,6 +2913,18 @@ func (s *DescribedUser) SetHomeDirectory(v string) *DescribedUser { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *DescribedUser) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *DescribedUser { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *DescribedUser) SetHomeDirectoryType(v string) *DescribedUser { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *DescribedUser) SetPolicy(v string) *DescribedUser { s.Policy = &v @@ -2787,7 +2961,7 @@ type EndpointDetails struct { _ struct{} `type:"structure"` // The ID of the VPC endpoint. - VpcEndpointId *string `type:"string"` + VpcEndpointId *string `min:"22" type:"string"` } // String returns the string representation @@ -2800,12 +2974,78 @@ func (s EndpointDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EndpointDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EndpointDetails"} + if s.VpcEndpointId != nil && len(*s.VpcEndpointId) < 22 { + invalidParams.Add(request.NewErrParamMinLen("VpcEndpointId", 22)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetVpcEndpointId sets the VpcEndpointId field's value. func (s *EndpointDetails) SetVpcEndpointId(v string) *EndpointDetails { s.VpcEndpointId = &v return s } +// Represents an object that contains entries and a targets for HomeDirectoryMappings. +type HomeDirectoryMapEntry struct { + _ struct{} `type:"structure"` + + // Represents an entry and a target for HomeDirectoryMappings. + // + // Entry is a required field + Entry *string `type:"string" required:"true"` + + // Represents the map target that is used in a HomeDirectorymapEntry. + // + // Target is a required field + Target *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s HomeDirectoryMapEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HomeDirectoryMapEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HomeDirectoryMapEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HomeDirectoryMapEntry"} + if s.Entry == nil { + invalidParams.Add(request.NewErrParamRequired("Entry")) + } + if s.Target == nil { + invalidParams.Add(request.NewErrParamRequired("Target")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntry sets the Entry field's value. +func (s *HomeDirectoryMapEntry) SetEntry(v string) *HomeDirectoryMapEntry { + s.Entry = &v + return s +} + +// SetTarget sets the Target field's value. +func (s *HomeDirectoryMapEntry) SetTarget(v string) *HomeDirectoryMapEntry { + s.Target = &v + return s +} + // Returns information related to the type of user authentication that is in // use for a server's users. A server can have only one method of authentication. type IdentityProviderDetails struct { @@ -2813,7 +3053,7 @@ type IdentityProviderDetails struct { // The InvocationRole parameter provides the type of InvocationRole used to // authenticate the user account. - InvocationRole *string `type:"string"` + InvocationRole *string `min:"20" type:"string"` // The Url parameter provides contains the location of the service endpoint // used to authenticate users. @@ -2830,6 +3070,19 @@ func (s IdentityProviderDetails) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *IdentityProviderDetails) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IdentityProviderDetails"} + if s.InvocationRole != nil && len(*s.InvocationRole) < 20 { + invalidParams.Add(request.NewErrParamMinLen("InvocationRole", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetInvocationRole sets the InvocationRole field's value. func (s *IdentityProviderDetails) SetInvocationRole(v string) *IdentityProviderDetails { s.InvocationRole = &v @@ -2848,7 +3101,7 @@ type ImportSshPublicKeyInput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The public key portion of an SSH key pair. // @@ -2858,7 +3111,7 @@ type ImportSshPublicKeyInput struct { // The name of the user account that is assigned to one or more servers. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -2877,12 +3130,18 @@ func (s *ImportSshPublicKeyInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.SshPublicKeyBody == nil { invalidParams.Add(request.NewErrParamRequired("SshPublicKeyBody")) } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -2917,18 +3176,18 @@ type ImportSshPublicKeyOutput struct { // A system-assigned unique identifier for an SFTP server. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // This identifier is the name given to a public key by the system that was // imported. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` // A user name assigned to the ServerID value that you specified. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -3175,7 +3434,7 @@ type ListUsersInput struct { // server that has users assigned to it. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3200,6 +3459,9 @@ func (s *ListUsersInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3237,7 +3499,7 @@ type ListUsersOutput struct { // assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // Returns the user accounts and their properties for the ServerId value that // you specify. @@ -3296,11 +3558,11 @@ type ListedServer struct { // The AWS Identity and Access Management entity that allows the server to turn // on Amazon CloudWatch logging. - LoggingRole *string `type:"string"` + LoggingRole *string `min:"20" type:"string"` // This value is the unique system assigned identifier for the SFTP servers // that were listed. - ServerId *string `type:"string"` + ServerId *string `min:"19" type:"string"` // This property describes the condition of the SFTP server for the server that // was described. A value of ONLINE> indicates that the server can accept jobs @@ -3383,18 +3645,25 @@ type ListedUser struct { // an Amazon S3 bucket for the user you specify by their ARN. HomeDirectory *string `type:"string"` + // The type of landing directory (folder) you mapped for your users' home directory. + // If you set it to PATH, the user will see the absolute Amazon S3 bucket paths + // as is in their SFTP clients. If you set it LOGICAL, you will need to provide + // mappings in the HomeDirectoryMappings for how you want to make S3 paths visible + // to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // The role in use by this user. A role is an AWS Identity and Access Management // (IAM) entity that, in this case, allows the SFTP server to act on a user's // behalf. It allows the server to inherit the trust relationship that enables // that user to perform file operations to their Amazon S3 bucket. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // This value is the number of SSH public keys stored for the user you specified. SshPublicKeyCount *int64 `type:"integer"` // The name of the user whose ARN was specified. User names are used for authentication // purposes. - UserName *string `type:"string"` + UserName *string `min:"3" type:"string"` } // String returns the string representation @@ -3419,6 +3688,12 @@ func (s *ListedUser) SetHomeDirectory(v string) *ListedUser { return s } +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *ListedUser) SetHomeDirectoryType(v string) *ListedUser { + s.HomeDirectoryType = &v + return s +} + // SetRole sets the Role field's value. func (s *ListedUser) SetRole(v string) *ListedUser { s.Role = &v @@ -3458,7 +3733,7 @@ type SshPublicKey struct { // The SshPublicKeyId parameter contains the identifier of the public key. // // SshPublicKeyId is a required field - SshPublicKeyId *string `type:"string" required:"true"` + SshPublicKeyId *string `min:"21" type:"string" required:"true"` } // String returns the string representation @@ -3495,7 +3770,7 @@ type StartServerInput struct { // A system-assigned unique identifier for an SFTP server that you start. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3514,6 +3789,9 @@ func (s *StartServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3547,7 +3825,7 @@ type StopServerInput struct { // A system-assigned unique identifier for an SFTP server that you stopped. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3566,6 +3844,9 @@ func (s *StopServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3744,12 +4025,12 @@ type TestIdentityProviderInput struct { // method is tested with a user name and password. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // This request parameter is the name of the user account to be tested. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` // The password of the user account to be tested. UserPassword *string `type:"string" sensitive:"true"` @@ -3771,9 +4052,15 @@ func (s *TestIdentityProviderInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3966,7 +4253,7 @@ type UpdateServerInput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -3985,6 +4272,19 @@ func (s *UpdateServerInput) Validate() error { if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } + if s.EndpointDetails != nil { + if err := s.EndpointDetails.Validate(); err != nil { + invalidParams.AddNested("EndpointDetails", err.(request.ErrInvalidParams)) + } + } + if s.IdentityProviderDetails != nil { + if err := s.IdentityProviderDetails.Validate(); err != nil { + invalidParams.AddNested("IdentityProviderDetails", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4035,7 +4335,7 @@ type UpdateServerOutput struct { // is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` } // String returns the string representation @@ -4058,9 +4358,33 @@ type UpdateUserInput struct { _ struct{} `type:"structure"` // A parameter that specifies the landing directory (folder) for a user when - // they log in to the server using their client. An example is /home/username . + // they log in to the server using their client. + // + // An example is /home/username. HomeDirectory *string `type:"string"` + // Logical directory mappings that specify what S3 paths and keys should be + // visible to your user and how you want to make them visible. You will need + // to specify the "Entry" and "Target" pair, where Entry shows how the path + // is made visible and Target is the actual S3 path. If you only specify a target, + // it will be displayed as is. You will need to also make sure that your AWS + // IAM Role provides access to paths in Target. The following is an example. + // + // '[ "/bucket2/documentation", { "Entry": "your-personal-report.pdf", "Target": + // "/bucket3/customized-reports/${transfer:UserName}.pdf" } ]' + // + // In most cases, you can use this value instead of the scope down policy to + // lock your user down to the designated home directory ("chroot"). To do this, + // you can set Entry to '/' and set Target to the HomeDirectory parameter value. + HomeDirectoryMappings []*HomeDirectoryMapEntry `min:"1" type:"list"` + + // The type of landing directory (folder) you want your users' home directory + // to be when they log into the SFTP serve. If you set it to PATH, the user + // will see the absolute Amazon S3 bucket paths as is in their SFTP clients. + // If you set it LOGICAL, you will need to provide mappings in the HomeDirectoryMappings + // for how you want to make S3 paths visible to your user. + HomeDirectoryType *string `type:"string" enum:"HomeDirectoryType"` + // Allows you to supply a scope-down policy for your user so you can use the // same AWS Identity and Access Management (IAM) role across multiple users. // The policy scopes down user access to portions of your Amazon S3 bucket. @@ -4084,13 +4408,13 @@ type UpdateUserInput struct { // S3 bucket or buckets. The IAM role should also contain a trust relationship // that allows the Secure File Transfer Protocol (SFTP) server to access your // resources when servicing your SFTP user's transfer requests. - Role *string `type:"string"` + Role *string `min:"20" type:"string"` // A system-assigned unique identifier for an SFTP server instance that the // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // A unique string that identifies a user and is associated with a server as // specified by the ServerId. This is the string that will be used by your user @@ -4099,7 +4423,7 @@ type UpdateUserInput struct { // A-Z, 0-9, underscore, and hyphen. The user name can't start with a hyphen. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4115,12 +4439,34 @@ func (s UpdateUserInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *UpdateUserInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "UpdateUserInput"} + if s.HomeDirectoryMappings != nil && len(s.HomeDirectoryMappings) < 1 { + invalidParams.Add(request.NewErrParamMinLen("HomeDirectoryMappings", 1)) + } + if s.Role != nil && len(*s.Role) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Role", 20)) + } if s.ServerId == nil { invalidParams.Add(request.NewErrParamRequired("ServerId")) } + if s.ServerId != nil && len(*s.ServerId) < 19 { + invalidParams.Add(request.NewErrParamMinLen("ServerId", 19)) + } if s.UserName == nil { invalidParams.Add(request.NewErrParamRequired("UserName")) } + if s.UserName != nil && len(*s.UserName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 3)) + } + if s.HomeDirectoryMappings != nil { + for i, v := range s.HomeDirectoryMappings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "HomeDirectoryMappings", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -4134,6 +4480,18 @@ func (s *UpdateUserInput) SetHomeDirectory(v string) *UpdateUserInput { return s } +// SetHomeDirectoryMappings sets the HomeDirectoryMappings field's value. +func (s *UpdateUserInput) SetHomeDirectoryMappings(v []*HomeDirectoryMapEntry) *UpdateUserInput { + s.HomeDirectoryMappings = v + return s +} + +// SetHomeDirectoryType sets the HomeDirectoryType field's value. +func (s *UpdateUserInput) SetHomeDirectoryType(v string) *UpdateUserInput { + s.HomeDirectoryType = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *UpdateUserInput) SetPolicy(v string) *UpdateUserInput { s.Policy = &v @@ -4167,13 +4525,13 @@ type UpdateUserOutput struct { // user account is assigned to. // // ServerId is a required field - ServerId *string `type:"string" required:"true"` + ServerId *string `min:"19" type:"string" required:"true"` // The unique identifier for a user that is assigned to the SFTP server instance // that was specified in the request. // // UserName is a required field - UserName *string `type:"string" required:"true"` + UserName *string `min:"3" type:"string" required:"true"` } // String returns the string representation @@ -4206,6 +4564,14 @@ const ( EndpointTypeVpcEndpoint = "VPC_ENDPOINT" ) +const ( + // HomeDirectoryTypePath is a HomeDirectoryType enum value + HomeDirectoryTypePath = "PATH" + + // HomeDirectoryTypeLogical is a HomeDirectoryType enum value + HomeDirectoryTypeLogical = "LOGICAL" +) + // Returns information related to the type of user authentication that is in // use for a server's users. For SERVICE_MANAGED authentication, the Secure // Shell (SSH) public keys are stored with a user on an SFTP server instance. diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go index 0734c873b55..60b6a6269fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/errors.go @@ -41,4 +41,12 @@ const ( // // The request has failed because the AWS Transfer for SFTP service is not available. ErrCodeServiceUnavailableException = "ServiceUnavailableException" + + // ErrCodeThrottlingException for service response error code + // "ThrottlingException". + // + // The request was denied due to request throttling. + // + // HTTP Status Code: 400 + ErrCodeThrottlingException = "ThrottlingException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go index 0fcea8665a0..90791826dac 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/transfer/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *Transfer { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "transfer" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *Transfer { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *Transfer { svc := &Transfer{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-11-05", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go index 09bf43d9eeb..81b9b1c93d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/waf/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/waf/service.go @@ -46,11 +46,11 @@ const ( // svc := waf.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAF { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAF { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAF { svc := &WAF{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-08-24", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go index 3a267ae6360..1eeabd40f0f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/wafregional/service.go @@ -46,11 +46,11 @@ const ( // svc := wafregional.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WAFRegional { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WAFRegional { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WAFRegional { svc := &WAFRegional{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-11-28", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go index c8a7fc0067e..5fae1688d08 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/worklink/service.go @@ -49,11 +49,11 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkLink { if c.SigningNameDerived || len(c.SigningName) == 0 { c.SigningName = "worklink" } - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkLink { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkLink { svc := &WorkLink{ Client: client.New( cfg, @@ -62,6 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2018-09-25", }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go index d48d30c8b8d..a785c950d76 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/api.go @@ -3907,7 +3907,16 @@ type DefaultWorkspaceCreationProperties struct { // The organizational unit (OU) in the directory for the WorkSpace machine accounts. DefaultOu *string `type:"string"` - // The public IP address to attach to all WorkSpaces that are created or rebuilt. + // Specifies whether to automatically assign a public IP address to WorkSpaces + // in this directory by default. If enabled, the public IP address allows outbound + // internet access from your WorkSpaces when you’re using an internet gateway + // in the Amazon VPC in which your WorkSpaces are located. If you're using a + // Network Address Translation (NAT) gateway for outbound internet access from + // your VPC, or if your WorkSpaces are in public subnets and you manually assign + // them Elastic IP addresses, you should disable this setting. This setting + // applies to new WorkSpaces that you launch or to existing WorkSpaces that + // you rebuild. For more information, see Configure a VPC for Amazon WorkSpaces + // (https://docs.aws.amazon.com/workspaces/latest/adminguide/amazon-workspaces-vpc.html). EnableInternetAccess *bool `type:"boolean"` // Specifies whether the directory is enabled for Amazon WorkDocs. @@ -4791,7 +4800,7 @@ type DescribeWorkspaceSnapshotsOutput struct { _ struct{} `type:"structure"` // Information about the snapshots that can be used to rebuild a WorkSpace. - // These snapshots include the root volume. + // These snapshots include the user volume. RebuildSnapshots []*Snapshot `type:"list"` // Information about the snapshots that can be used to restore a WorkSpace. @@ -7152,7 +7161,7 @@ type WorkspaceProperties struct { RunningMode *string `type:"string" enum:"RunningMode"` // The time after a user logs off when WorkSpaces are automatically stopped. - // Configured in 60 minute intervals. + // Configured in 60-minute intervals. RunningModeAutoStopTimeoutInMinutes *int64 `type:"integer"` // The size of the user storage. diff --git a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go index 38e1cc2ee1f..63ae2bf7457 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/workspaces/service.go @@ -46,11 +46,11 @@ const ( // svc := workspaces.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *WorkSpaces { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *WorkSpaces { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *WorkSpaces { svc := &WorkSpaces{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2015-04-08", JSONVersion: "1.1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go index fdc5ea32958..9a34ccedc9b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/xray/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/xray/service.go @@ -46,11 +46,11 @@ const ( // svc := xray.New(mySession, aws.NewConfig().WithRegion("us-west-2")) func New(p client.ConfigProvider, cfgs ...*aws.Config) *XRay { c := p.ClientConfig(EndpointsID, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) } // newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *XRay { +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *XRay { svc := &XRay{ Client: client.New( cfg, @@ -59,6 +59,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, + PartitionID: partitionID, Endpoint: endpoint, APIVersion: "2016-04-12", }, diff --git a/vendor/github.com/bombsimon/wsl/wsl.go b/vendor/github.com/bombsimon/wsl/wsl.go index 3dcc32b196d..fe62b62c602 100644 --- a/vendor/github.com/bombsimon/wsl/wsl.go +++ b/vendor/github.com/bombsimon/wsl/wsl.go @@ -7,6 +7,7 @@ import ( "go/token" "io/ioutil" "reflect" + "strings" ) type Configuration struct { @@ -49,6 +50,29 @@ type Configuration struct { // } AllowMultiLineAssignCuddle bool + // AllowCaseTrailingWhitespace will allow case blocks to end with a + // whitespace. Sometimes this might actually improve readability. This + // defaults to false but setting it to true will enable the following + // example: + // switch { + // case 1: + // fmt.Println(1) + // + // case 2: + // fmt.Println(2) + // + // case 3: + // fmt:println(3) + // } + AllowCaseTrailingWhitespace bool + + // AllowCuddleDeclaration will allow multiple var/declaration statements to + // be cuddled. This defaults to false but setting it to true will enable the + // following example: + // var foo bool + // var err error + AllowCuddleDeclaration bool + // AllowCuddleWithCalls is a list of call idents that everything can be // cuddled with. Defaults to calls looking like locks to support a flow like // this: @@ -69,11 +93,12 @@ type Configuration struct { // DefaultConfig returns default configuration func DefaultConfig() Configuration { return Configuration{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowMultiLineAssignCuddle: true, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowMultiLineAssignCuddle: true, + AllowCaseTrailingWhitespace: false, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, } } @@ -113,6 +138,7 @@ func NewProcessor() *Processor { // ProcessFiles takes a string slice with file names (full paths) and lints // them. +// nolint: gocritic func (p *Processor) ProcessFiles(filenames []string) ([]Result, []string) { for _, filename := range filenames { data, err := ioutil.ReadFile(filename) @@ -147,7 +173,7 @@ func (p *Processor) process(filename string, data []byte) { for _, d := range p.file.Decls { switch v := d.(type) { case *ast.FuncDecl: - p.parseBlockBody(v.Body) + p.parseBlockBody(v.Name, v.Body) case *ast.GenDecl: // `go fmt` will handle proper spacing for GenDecl such as imports, // constants etc. @@ -159,14 +185,14 @@ func (p *Processor) process(filename string, data []byte) { // parseBlockBody will parse any kind of block statements such as switch cases // and if statements. A list of Result is returned. -func (p *Processor) parseBlockBody(block *ast.BlockStmt) { +func (p *Processor) parseBlockBody(ident *ast.Ident, block *ast.BlockStmt) { // Nothing to do if there's no value. if reflect.ValueOf(block).IsNil() { return } // Start by finding leading and trailing whitespaces. - p.findLeadingAndTrailingWhitespaces(block, nil) + p.findLeadingAndTrailingWhitespaces(ident, block, nil) // Parse the block body contents. p.parseBlockStatements(block.List) @@ -182,7 +208,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { if as, isAssignStmt := stmt.(*ast.AssignStmt); isAssignStmt { for _, rhs := range as.Rhs { if fl, isFuncLit := rhs.(*ast.FuncLit); isFuncLit { - p.parseBlockBody(fl.Body) + p.parseBlockBody(nil, fl.Body) } } } @@ -244,14 +270,6 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { calledOrAssignedOnLineAbove = append(calledOnLineAbove, assignedOnLineAbove...) ) - /* - DEBUG: - fmt.Println("LHS: ", leftHandSide) - fmt.Println("RHS: ", rightHandSide) - fmt.Println("Assigned above: ", assignedOnLineAbove) - fmt.Println("Assigned first: ", assignedFirstInBlock) - */ - // If we called some kind of lock on the line above we allow cuddling // anything. if atLeastOneInListsMatch(calledOnLineAbove, p.config.AllowCuddleWithCalls) { @@ -282,6 +300,7 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { // t.X = true // return t // } + // nolint: gocritic if i == len(statements)-1 && i == 1 { if p.nodeEnd(stmt)-p.nodeStart(previousStatement) <= 2 { return true @@ -351,7 +370,9 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { p.addError(t.Pos(), "assignments should only be cuddled with other assignments") case *ast.DeclStmt: - p.addError(t.Pos(), "declarations should never be cuddled") + if !p.config.AllowCuddleDeclaration { + p.addError(t.Pos(), "declarations should never be cuddled") + } case *ast.ExprStmt: switch previousStatement.(type) { case *ast.DeclStmt, *ast.ReturnStmt: @@ -394,6 +415,25 @@ func (p *Processor) parseBlockStatements(statements []ast.Stmt) { continue } + // Special treatment of deferring body closes after error checking + // according to best practices. See + // https://github.com/bombsimon/wsl/issues/31 which links to + // discussion about error handling after HTTP requests. This is hard + // coded and very specific but for now this is to be seen as a + // special case. What this does is that it *only* allows a defer + // statement with `Close` on the right hand side to be cuddled with + // an if-statement to support this: + // resp, err := client.Do(req) + // if err != nil { + // return err + // } + // defer resp.Body.Close() + if _, ok := previousStatement.(*ast.IfStmt); ok { + if atLeastOneInListsMatch(rightHandSide, []string{"Close"}) { + continue + } + } + if moreThanOneStatementAbove() { p.addError(t.Pos(), "only one cuddle assignment allowed before defer statement") @@ -517,7 +557,7 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { } } - p.parseBlockBody(statementBodyContent) + p.parseBlockBody(nil, statementBodyContent) case []ast.Stmt: // The Body field for an *ast.CaseClause or *ast.CommClause is of type // []ast.Stmt. We must check leading and trailing whitespaces and then @@ -530,7 +570,7 @@ func (p *Processor) firstBodyStatement(i int, allStmt []ast.Stmt) ast.Node { nextStatement = allStmt[i+1] } - p.findLeadingAndTrailingWhitespaces(stmt, nextStatement) + p.findLeadingAndTrailingWhitespaces(nil, stmt, nextStatement) p.parseBlockStatements(statementBodyContent) default: p.addWarning( @@ -664,6 +704,13 @@ func (p *Processor) findRHS(node ast.Node) []string { return p.findRHS(t.Call) case *ast.SendStmt: return p.findLHS(t.Value) + case *ast.IndexExpr: + rhs = append(rhs, p.findRHS(t.Index)...) + rhs = append(rhs, p.findRHS(t.X)...) + case *ast.SliceExpr: + rhs = append(rhs, p.findRHS(t.X)...) + rhs = append(rhs, p.findRHS(t.Low)...) + rhs = append(rhs, p.findRHS(t.High)...) default: if x, ok := maybeX(t); ok { return p.findRHS(x) @@ -679,7 +726,7 @@ func (p *Processor) findRHS(node ast.Node) []string { // if it exists. If the node doesn't have an X field nil and false is returned. // Known fields with X that are handled: // IndexExpr, ExprStmt, SelectorExpr, StarExpr, ParentExpr, TypeAssertExpr, -// RangeStmt, UnaryExpr, ParenExpr, SLiceExpr, IncDecStmt. +// RangeStmt, UnaryExpr, ParenExpr, SliceExpr, IncDecStmt. func maybeX(node interface{}) (ast.Node, bool) { maybeHasX := reflect.Indirect(reflect.ValueOf(node)).FieldByName("X") if !maybeHasX.IsValid() { @@ -726,7 +773,8 @@ func atLeastOneInListsMatch(listOne, listTwo []string) bool { // findLeadingAndTrailingWhitespaces will find leading and trailing whitespaces // in a node. The method takes comments in consideration which will make the // parser more gentle. -func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.Node) { +// nolint: gocognit +func (p *Processor) findLeadingAndTrailingWhitespaces(ident *ast.Ident, stmt, nextStatement ast.Node) { var ( allowedLinesBeforeFirstStatement = 1 commentMap = ast.NewCommentMap(p.fileSet, stmt, p.file.Comments) @@ -811,11 +859,16 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No return } + // If we allow case to end white whitespace just return. + if p.config.AllowCaseTrailingWhitespace { + return + } + switch n := nextStatement.(type) { case *ast.CaseClause: - blockEndPos = n.Colon + blockEndPos = n.Case case *ast.CommClause: - blockEndPos = n.Colon + blockEndPos = n.Case default: // We're not at the end of the case? return @@ -824,7 +877,7 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No blockEndLine = p.fileSet.Position(blockEndPos).Line } - if p.nodeEnd(lastStatement) != blockEndLine-1 { + if p.nodeEnd(lastStatement) != blockEndLine-1 && !isExampleFunc(ident) { p.addError( blockEndPos, "block should not end with a whitespace (or comment)", @@ -832,6 +885,10 @@ func (p *Processor) findLeadingAndTrailingWhitespaces(stmt, nextStatement ast.No } } +func isExampleFunc(ident *ast.Ident) bool { + return ident != nil && strings.HasPrefix(ident.Name, "Example") +} + func (p *Processor) nodeStart(node ast.Node) int { return p.fileSet.Position(node.Pos()).Line } diff --git a/vendor/github.com/gofrs/flock/.gitignore b/vendor/github.com/gofrs/flock/.gitignore new file mode 100644 index 00000000000..daf913b1b34 --- /dev/null +++ b/vendor/github.com/gofrs/flock/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/gofrs/flock/.travis.yml b/vendor/github.com/gofrs/flock/.travis.yml new file mode 100644 index 00000000000..b791a74213c --- /dev/null +++ b/vendor/github.com/gofrs/flock/.travis.yml @@ -0,0 +1,10 @@ +language: go +go: + - 1.10.x + - 1.11.x +script: go test -v -check.vv -race ./... +sudo: false +notifications: + email: + on_success: never + on_failure: always diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE new file mode 100644 index 00000000000..aff7d358e24 --- /dev/null +++ b/vendor/github.com/gofrs/flock/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Tim Heckman +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of linode-netint nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md new file mode 100644 index 00000000000..71ce63692e8 --- /dev/null +++ b/vendor/github.com/gofrs/flock/README.md @@ -0,0 +1,41 @@ +# flock +[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) +[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) +[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) + +`flock` implements a thread-safe sync.Locker interface for file locking. It also +includes a non-blocking TryLock() function to allow locking without blocking execution. + +## License +`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. + +## Go Compatibility +This package makes use of the `context` package that was introduced in Go 1.7. As such, this +package has an implicit dependency on Go 1.7+. + +## Installation +``` +go get -u github.com/gofrs/flock +``` + +## Usage +```Go +import "github.com/gofrs/flock" + +fileLock := flock.New("/var/lock/go-lock.lock") + +locked, err := fileLock.TryLock() + +if err != nil { + // handle locking error +} + +if locked { + // do work + fileLock.Unlock() +} +``` + +For more detailed usage information take a look at the package API docs on +[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/appveyor.yml b/vendor/github.com/gofrs/flock/appveyor.yml new file mode 100644 index 00000000000..6848e94bf88 --- /dev/null +++ b/vendor/github.com/gofrs/flock/appveyor.yml @@ -0,0 +1,25 @@ +version: '{build}' + +build: false +deploy: false + +clone_folder: 'c:\gopath\src\github.com\gofrs\flock' + +environment: + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + +init: + - git config --global core.autocrlf input + +install: + - rmdir c:\go /s /q + - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi + - msiexec /i go%GOVERSION%.windows-amd64.msi /q + - set Path=c:\go\bin;c:\gopath\bin;%Path% + - go version + - go env + +test_script: + - go get -t ./... + - go test -race -v ./... diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go new file mode 100644 index 00000000000..8f109b8a967 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock.go @@ -0,0 +1,127 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// Package flock implements a thread-safe interface for file locking. +// It also includes a non-blocking TryLock() function to allow locking +// without blocking execution. +// +// Package flock is released under the BSD 3-Clause License. See the LICENSE file +// for more details. +// +// While using this library, remember that the locking behaviors are not +// guaranteed to be the same on each platform. For example, some UNIX-like +// operating systems will transparently convert a shared lock to an exclusive +// lock. If you Unlock() the flock from a location where you believe that you +// have the shared lock, you may accidentally drop the exclusive lock. +package flock + +import ( + "context" + "os" + "sync" + "time" +) + +// Flock is the struct type to handle file locking. All fields are unexported, +// with access to some of the fields provided by getter methods (Path() and Locked()). +type Flock struct { + path string + m sync.RWMutex + fh *os.File + l bool + r bool +} + +// New returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +func New(path string) *Flock { + return &Flock{path: path} +} + +// NewFlock returns a new instance of *Flock. The only parameter +// it takes is the path to the desired lockfile. +// +// Deprecated: Use New instead. +func NewFlock(path string) *Flock { + return New(path) +} + +// Close is equivalent to calling Unlock. +// +// This will release the lock and close the underlying file descriptor. +// It will not remove the file from disk, that's up to your application. +func (f *Flock) Close() error { + return f.Unlock() +} + +// Path returns the path as provided in NewFlock(). +func (f *Flock) Path() string { + return f.path +} + +// Locked returns the lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) Locked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.l +} + +// RLocked returns the read lock state (locked: true, unlocked: false). +// +// Warning: by the time you use the returned value, the state may have changed. +func (f *Flock) RLocked() bool { + f.m.RLock() + defer f.m.RUnlock() + return f.r +} + +func (f *Flock) String() string { + return f.path +} + +// TryLockContext repeatedly tries to take an exclusive lock until one of the +// conditions is met: TryLock succeeds, TryLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryLock, retryDelay) +} + +// TryRLockContext repeatedly tries to take a shared lock until one of the +// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context +// Done channel is closed. +func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { + return tryCtx(ctx, f.TryRLock, retryDelay) +} + +func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + for { + if ok, err := fn(); ok || err != nil { + return ok, err + } + select { + case <-ctx.Done(): + return false, ctx.Err() + case <-time.After(retryDelay): + // try again + } + } +} + +func (f *Flock) setFh() error { + // open a new os.File instance + // create it if it doesn't exist, and open the file read-only. + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) + if err != nil { + return err + } + + // set the filehandle on the struct + f.fh = fh + return nil +} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go new file mode 100644 index 00000000000..45f71a707c3 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_unix.go @@ -0,0 +1,195 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build !windows + +package flock + +import ( + "os" + "syscall" +) + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already exclusive-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +// +// If the *Flock has a shared lock (RLock), this may transparently replace the +// shared lock with an exclusive lock on some UNIX-like operating systems. Be +// careful when using exclusive locks in conjunction with shared locks +// (RLock()), because calling Unlock() may accidentally release the exclusive +// lock that was once a shared lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, syscall.LOCK_EX) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already shared-locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) lock(locked *bool, flag int) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { + shouldRetry, reopenErr := f.reopenFDOnError(err) + if reopenErr != nil { + return reopenErr + } + + if !shouldRetry { + return err + } + + if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { + return err + } + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// syscall.LOCK_UN on the file and closes the file descriptor. It does not +// remove the file from disk. It's up to your application to do. +// +// Please note, if your shared lock became an exclusive lock this may +// unintentionally drop the exclusive lock if called by the consumer that +// believes they have a shared lock. Please see Lock() for more details. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { + return err + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, syscall.LOCK_EX) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function takes an RW-mutex lock before it tries to lock the file, so there is +// the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being share-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, syscall.LOCK_SH) +} + +func (f *Flock) try(locked *bool, flag int) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + var retried bool +retry: + err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) + + switch err { + case syscall.EWOULDBLOCK: + return false, nil + case nil: + *locked = true + return true, nil + } + if !retried { + if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { + return false, reopenErr + } else if shouldRetry { + retried = true + goto retry + } + } + + return false, err +} + +// reopenFDOnError determines whether we should reopen the file handle +// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: +// Since Linux 3.4 (commit 55725513) +// Probably NFSv4 where flock() is emulated by fcntl(). +func (f *Flock) reopenFDOnError(err error) (bool, error) { + if err != syscall.EIO && err != syscall.EBADF { + return false, nil + } + if st, err := f.fh.Stat(); err == nil { + // if the file is able to be read and written + if st.Mode()&0600 == 0600 { + f.fh.Close() + f.fh = nil + + // reopen in read-write mode and set the filehandle + fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) + if err != nil { + return false, err + } + f.fh = fh + return true, nil + } + } + + return false, nil +} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go new file mode 100644 index 00000000000..fe405a255ae --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_winapi.go @@ -0,0 +1,76 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +// +build windows + +package flock + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32, _ = syscall.LoadLibrary("kernel32.dll") + procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") + procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") +) + +const ( + winLockfileFailImmediately = 0x00000001 + winLockfileExclusiveLock = 0x00000002 + winLockfileSharedLock = 0x00000000 +) + +// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows +// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: +// +// > The function requests an exclusive lock. Otherwise, it requests a shared +// > lock. +// +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + +func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procLockFileEx), + 6, + uintptr(handle), + uintptr(flags), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset))) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} + +func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { + r1, _, errNo := syscall.Syscall6( + uintptr(procUnlockFileEx), + 5, + uintptr(handle), + uintptr(reserved), + uintptr(numberOfBytesToLockLow), + uintptr(numberOfBytesToLockHigh), + uintptr(unsafe.Pointer(offset)), + 0) + + if r1 != 1 { + if errNo == 0 { + return false, syscall.EINVAL + } + + return false, errNo + } + + return true, 0 +} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go new file mode 100644 index 00000000000..9f4a5f10d24 --- /dev/null +++ b/vendor/github.com/gofrs/flock/flock_windows.go @@ -0,0 +1,140 @@ +// Copyright 2015 Tim Heckman. All rights reserved. +// Use of this source code is governed by the BSD 3-Clause +// license that can be found in the LICENSE file. + +package flock + +import ( + "syscall" +) + +// ErrorLockViolation is the error code returned from the Windows syscall when a +// lock would block and you ask to fail immediately. +const ErrorLockViolation syscall.Errno = 0x21 // 33 + +// Lock is a blocking call to try and take an exclusive file lock. It will wait +// until it is able to obtain the exclusive file lock. It's recommended that +// TryLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) Lock() error { + return f.lock(&f.l, winLockfileExclusiveLock) +} + +// RLock is a blocking call to try and take a shared file lock. It will wait +// until it is able to obtain the shared file lock. It's recommended that +// TryRLock() be used over this function. This function may block the ability to +// query the current Locked() or RLocked() status due to a RW-mutex lock. +// +// If we are already locked, this function short-circuits and returns +// immediately assuming it can take the mutex lock. +func (f *Flock) RLock() error { + return f.lock(&f.r, winLockfileSharedLock) +} + +func (f *Flock) lock(locked *bool, flag uint32) error { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return err + } + } + + if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + *locked = true + return nil +} + +// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so +// while it is running the Locked() and RLocked() functions will be blocked. +// +// This function short-circuits if we are unlocked already. If not, it calls +// UnlockFileEx() on the file and closes the file descriptor. It does not remove +// the file from disk. It's up to your application to do. +func (f *Flock) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // if we aren't locked or if the lockfile instance is nil + // just return a nil error because we are unlocked + if (!f.l && !f.r) || f.fh == nil { + return nil + } + + // mark the file as unlocked + if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { + return errNo + } + + f.fh.Close() + + f.l = false + f.r = false + f.fh = nil + + return nil +} + +// TryLock is the preferred function for taking an exclusive file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the exclusive +// file lock, the function will return false instead of waiting for the lock. If +// we get the lock, we also set the *Flock instance as being exclusive-locked. +func (f *Flock) TryLock() (bool, error) { + return f.try(&f.l, winLockfileExclusiveLock) +} + +// TryRLock is the preferred function for taking a shared file lock. This +// function does take a RW-mutex lock before it tries to lock the file, so there +// is the possibility that this function may block for a short time if another +// goroutine is trying to take any action. +// +// The actual file lock is non-blocking. If we are unable to get the shared file +// lock, the function will return false instead of waiting for the lock. If we +// get the lock, we also set the *Flock instance as being shared-locked. +func (f *Flock) TryRLock() (bool, error) { + return f.try(&f.r, winLockfileSharedLock) +} + +func (f *Flock) try(locked *bool, flag uint32) (bool, error) { + f.m.Lock() + defer f.m.Unlock() + + if *locked { + return true, nil + } + + if f.fh == nil { + if err := f.setFh(); err != nil { + return false, err + } + } + + _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) + + if errNo > 0 { + if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { + return false, nil + } + + return false, errNo + } + + *locked = true + + return true, nil +} diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go index 35d6155acaf..9ac140c50ea 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/cache.go @@ -12,7 +12,6 @@ import ( "bytes" "crypto/sha256" "encoding/hex" - "errors" "fmt" "io" "io/ioutil" @@ -22,6 +21,8 @@ import ( "strings" "time" + "github.com/pkg/errors" + "github.com/golangci/golangci-lint/internal/renameio" ) @@ -144,28 +145,35 @@ func (c *Cache) get(id ActionID) (Entry, error) { missing := func() (Entry, error) { return Entry{}, errMissing } - f, err := os.Open(c.fileName(id, "a")) + failed := func(err error) (Entry, error) { + return Entry{}, err + } + fileName := c.fileName(id, "a") + f, err := os.Open(fileName) if err != nil { - return missing() + if os.IsNotExist(err) { + return missing() + } + return failed(err) } defer f.Close() entry := make([]byte, entrySize+1) // +1 to detect whether f is too long - if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { - return missing() + if n, readErr := io.ReadFull(f, entry); n != entrySize || readErr != io.ErrUnexpectedEOF { + return failed(fmt.Errorf("read %d/%d bytes from %s with error %s", n, entrySize, fileName, readErr)) } if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { - return missing() + return failed(fmt.Errorf("bad data in %s", fileName)) } eid, entry := entry[3:3+hexSize], entry[3+hexSize:] eout, entry := entry[1:1+hexSize], entry[1+hexSize:] esize, entry := entry[1:1+20], entry[1+20:] - etime, entry := entry[1:1+20], entry[1+20:] + etime := entry[1 : 1+20] var buf [HashSize]byte - if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { - return missing() + if _, err = hex.Decode(buf[:], eid); err != nil || buf != id { + return failed(errors.Wrapf(err, "failed to hex decode eid data in %s", fileName)) } - if _, err := hex.Decode(buf[:], eout); err != nil { - return missing() + if _, err = hex.Decode(buf[:], eout); err != nil { + return failed(errors.Wrapf(err, "failed to hex decode eout data in %s", fileName)) } i := 0 for i < len(esize) && esize[i] == ' ' { @@ -173,7 +181,7 @@ func (c *Cache) get(id ActionID) (Entry, error) { } size, err := strconv.ParseInt(string(esize[i:]), 10, 64) if err != nil || size < 0 { - return missing() + return failed(fmt.Errorf("failed to parse esize int from %s with error %s", fileName, err)) } i = 0 for i < len(etime) && etime[i] == ' ' { @@ -181,10 +189,12 @@ func (c *Cache) get(id ActionID) (Entry, error) { } tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) if err != nil || tm < 0 { - return missing() + return failed(fmt.Errorf("failed to parse etime int from %s with error %s", fileName, err)) } - c.used(c.fileName(id, "a")) + if err = c.used(fileName); err != nil { + return failed(errors.Wrapf(err, "failed to mark %s as used", fileName)) + } return Entry{buf, size, time.Unix(0, tm)}, nil } @@ -196,7 +206,12 @@ func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { if err != nil { return "", Entry{}, err } - file = c.OutputFile(entry.OutputID) + + file, err = c.OutputFile(entry.OutputID) + if err != nil { + return "", Entry{}, err + } + info, err := os.Stat(file) if err != nil || info.Size() != entry.Size { return "", Entry{}, errMissing @@ -212,7 +227,16 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { if err != nil { return nil, entry, err } - data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) + outputFile, err := c.OutputFile(entry.OutputID) + if err != nil { + return nil, entry, err + } + + data, err := ioutil.ReadFile(outputFile) + if err != nil { + return nil, entry, err + } + if sha256.Sum256(data) != entry.OutputID { return nil, entry, errMissing } @@ -220,10 +244,12 @@ func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { } // OutputFile returns the name of the cache file storing output with the given OutputID. -func (c *Cache) OutputFile(out OutputID) string { +func (c *Cache) OutputFile(out OutputID) (string, error) { file := c.fileName(out, "d") - c.used(file) - return file + if err := c.used(file); err != nil { + return "", err + } + return file, nil } // Time constants for cache expiration. @@ -253,12 +279,21 @@ const ( // mtime is more than an hour old. This heuristic eliminates // nearly all of the mtime updates that would otherwise happen, // while still keeping the mtimes useful for cache trimming. -func (c *Cache) used(file string) { +func (c *Cache) used(file string) error { info, err := os.Stat(file) - if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { - return + if err != nil { + return errors.Wrapf(err, "failed to stat file %s", file) } - os.Chtimes(file, c.now(), c.now()) + + if c.now().Sub(info.ModTime()) < mtimeInterval { + return nil + } + + if err := os.Chtimes(file, c.now(), c.now()); err != nil { + return errors.Wrapf(err, "failed to change time of file %s", file) + } + + return nil } // Trim removes old cache entries that are likely not to be reused. @@ -285,7 +320,7 @@ func (c *Cache) Trim() { // Ignore errors from here: if we don't write the complete timestamp, the // cache will appear older than it is, and we'll trim it again next time. - renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) + _ = renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666) } // trimSubdir trims a single cache subdirectory. @@ -367,7 +402,9 @@ func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify os.Remove(file) return err } - os.Chtimes(file, c.now(), c.now()) // mainly for tests + if err = os.Chtimes(file, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", file) + } return nil } @@ -421,9 +458,12 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { info, err := os.Stat(name) if err == nil && info.Size() == size { // Check hash. - if f, err := os.Open(name); err == nil { + if f, openErr := os.Open(name); openErr == nil { h := sha256.New() - io.Copy(h, f) + if _, copyErr := io.Copy(h, f); copyErr != nil { + return errors.Wrap(copyErr, "failed to copy to sha256") + } + f.Close() var out2 OutputID h.Sum(out2[:0]) @@ -456,44 +496,49 @@ func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { // before returning, to avoid leaving bad bytes in the file. // Copy file to f, but also into h to double-check hash. - if _, err := file.Seek(0, 0); err != nil { - f.Truncate(0) + if _, err = file.Seek(0, 0); err != nil { + _ = f.Truncate(0) return err } h := sha256.New() w := io.MultiWriter(f, h) - if _, err := io.CopyN(w, file, size-1); err != nil { - f.Truncate(0) + if _, err = io.CopyN(w, file, size-1); err != nil { + _ = f.Truncate(0) return err } // Check last byte before writing it; writing it will make the size match // what other processes expect to find and might cause them to start // using the file. buf := make([]byte, 1) - if _, err := file.Read(buf); err != nil { - f.Truncate(0) + if _, err = file.Read(buf); err != nil { + _ = f.Truncate(0) return err } - h.Write(buf) + if n, wErr := h.Write(buf); n != len(buf) { + return fmt.Errorf("wrote to hash %d/%d bytes with error %s", n, len(buf), wErr) + } + sum := h.Sum(nil) if !bytes.Equal(sum, out[:]) { - f.Truncate(0) + _ = f.Truncate(0) return fmt.Errorf("file content changed underfoot") } // Commit cache file entry. - if _, err := f.Write(buf); err != nil { - f.Truncate(0) + if _, err = f.Write(buf); err != nil { + _ = f.Truncate(0) return err } - if err := f.Close(); err != nil { + if err = f.Close(); err != nil { // Data might not have been written, // but file may look like it is the right size. // To be extra careful, remove cached file. os.Remove(name) return err } - os.Chtimes(name, c.now(), c.now()) // mainly for tests + if err = os.Chtimes(name, c.now(), c.now()); err != nil { // mainly for tests + return errors.Wrapf(err, "failed to change time of file %s", name) + } return nil } diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go index 162bcc972a8..e8866cb30cc 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/default.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/default.go @@ -39,7 +39,9 @@ func initDefaultCache() { } if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { // Best effort. - ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) + if wErr := ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666); wErr != nil { + log.Fatalf("Failed to write README file to cache dir %s: %s", dir, err) + } } c, err := Open(dir) diff --git a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go index a42f149c712..4ce79e325b2 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go +++ b/vendor/github.com/golangci/golangci-lint/internal/cache/hash.go @@ -42,11 +42,19 @@ func SetSalt(b []byte) { // Subkey returns an action ID corresponding to mixing a parent // action ID with a string description of the subkey. -func Subkey(parent ActionID, desc string) ActionID { +func Subkey(parent ActionID, desc string) (ActionID, error) { h := sha256.New() - h.Write([]byte("subkey:")) - h.Write(parent[:]) - h.Write([]byte(desc)) + const subkeyPrefix = "subkey:" + if n, err := h.Write([]byte(subkeyPrefix)); n != len(subkeyPrefix) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of subkey prefix with error %s", n, len(subkeyPrefix), err) + } + if n, err := h.Write(parent[:]); n != len(parent) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of parent with error %s", n, len(parent), err) + } + if n, err := h.Write([]byte(desc)); n != len(desc) { + return ActionID{}, fmt.Errorf("wrote %d/%d bytes of desc with error %s", n, len(desc), err) + } + var out ActionID h.Sum(out[:0]) if debugHash { @@ -57,21 +65,23 @@ func Subkey(parent ActionID, desc string) ActionID { hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) hashDebug.Unlock() } - return out + return out, nil } // NewHash returns a new Hash. // The caller is expected to Write data to it and then call Sum. -func NewHash(name string) *Hash { +func NewHash(name string) (*Hash, error) { h := &Hash{h: sha256.New(), name: name} if debugHash { fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) } - h.Write(hashSalt) + if n, err := h.Write(hashSalt); n != len(hashSalt) { + return nil, fmt.Errorf("wrote %d/%d bytes of hash salt with error %s", n, len(hashSalt), err) + } if verify { h.buf = new(bytes.Buffer) } - return h + return h, nil } // Write writes data to the running hash. diff --git a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go index 79da2808a2d..86007d0427d 100644 --- a/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go +++ b/vendor/github.com/golangci/golangci-lint/internal/pkgcache/pkgcache.go @@ -17,6 +17,14 @@ import ( "github.com/golangci/golangci-lint/pkg/timeutils" ) +type HashMode int + +const ( + HashModeNeedOnlySelf HashMode = iota + HashModeNeedDirectDeps + HashModeNeedAllDeps +) + // Cache is a per-package data cache. A cached data is invalidated when // package or it's dependencies change. type Cache struct { @@ -46,7 +54,7 @@ func (c *Cache) Trim() { }) } -func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { +func (c *Cache) Put(pkg *packages.Package, mode HashMode, key string, data interface{}) error { var err error buf := &bytes.Buffer{} c.sw.TrackStage("gob", func() { @@ -59,9 +67,13 @@ func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { var aID cache.ActionID c.sw.TrackStage("key build", func() { - aID, err = c.pkgActionID(pkg) + aID, err = c.pkgActionID(pkg, mode) if err == nil { - aID = cache.Subkey(aID, key) + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey } }) if err != nil { @@ -81,13 +93,17 @@ func (c *Cache) Put(pkg *packages.Package, key string, data interface{}) error { var ErrMissing = errors.New("missing data") -func (c *Cache) Get(pkg *packages.Package, key string, data interface{}) error { +func (c *Cache) Get(pkg *packages.Package, mode HashMode, key string, data interface{}) error { var aID cache.ActionID var err error c.sw.TrackStage("key build", func() { - aID, err = c.pkgActionID(pkg) + aID, err = c.pkgActionID(pkg, mode) if err == nil { - aID = cache.Subkey(aID, key) + subkey, subkeyErr := cache.Subkey(aID, key) + if subkeyErr != nil { + err = errors.Wrap(subkeyErr, "failed to build subkey") + } + aID = subkey } }) if err != nil { @@ -117,13 +133,16 @@ func (c *Cache) Get(pkg *packages.Package, key string, data interface{}) error { return nil } -func (c *Cache) pkgActionID(pkg *packages.Package) (cache.ActionID, error) { - hash, err := c.packageHash(pkg) +func (c *Cache) pkgActionID(pkg *packages.Package, mode HashMode) (cache.ActionID, error) { + hash, err := c.packageHash(pkg, mode) if err != nil { return cache.ActionID{}, errors.Wrap(err, "failed to get package hash") } - key := cache.NewHash("action ID") + key, err := cache.NewHash("action ID") + if err != nil { + return cache.ActionID{}, errors.Wrap(err, "failed to make a hash") + } fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) fmt.Fprintf(key, "pkghash %s\n", hash) @@ -133,23 +152,36 @@ func (c *Cache) pkgActionID(pkg *packages.Package) (cache.ActionID, error) { // packageHash computes a package's hash. The hash is based on all Go // files that make up the package, as well as the hashes of imported // packages. -func (c *Cache) packageHash(pkg *packages.Package) (string, error) { - cachedHash, ok := c.pkgHashes.Load(pkg) +func (c *Cache) packageHash(pkg *packages.Package, mode HashMode) (string, error) { + type hashResults map[HashMode]string + hashResI, ok := c.pkgHashes.Load(pkg) if ok { - return cachedHash.(string), nil + hashRes := hashResI.(hashResults) + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("no mode %d in hash result", mode) + } + return hashRes[mode], nil + } + + hashRes := hashResults{} + + key, err := cache.NewHash("package hash") + if err != nil { + return "", errors.Wrap(err, "failed to make a hash") } - key := cache.NewHash("package hash") fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) for _, f := range pkg.CompiledGoFiles { c.ioSem <- struct{}{} - h, err := cache.FileHash(f) + h, fErr := cache.FileHash(f) <-c.ioSem - if err != nil { - return "", errors.Wrapf(err, "failed to calculate file %s hash", f) + if fErr != nil { + return "", errors.Wrapf(fErr, "failed to calculate file %s hash", f) } fmt.Fprintf(key, "file %s %x\n", f, h) } + curSum := key.Sum() + hashRes[HashModeNeedOnlySelf] = hex.EncodeToString(curSum[:]) imps := make([]*packages.Package, 0, len(pkg.Imports)) for _, imp := range pkg.Imports { @@ -158,20 +190,40 @@ func (c *Cache) packageHash(pkg *packages.Package) (string, error) { sort.Slice(imps, func(i, j int) bool { return imps[i].PkgPath < imps[j].PkgPath }) - for _, dep := range imps { - if dep.PkgPath == "unsafe" { - continue - } - depHash, err := c.packageHash(dep) - if err != nil { - return "", errors.Wrapf(err, "failed to calculate hash for dependency %s", dep.Name) + calcDepsHash := func(depMode HashMode) error { + for _, dep := range imps { + if dep.PkgPath == "unsafe" { + continue + } + + depHash, depErr := c.packageHash(dep, depMode) + if depErr != nil { + return errors.Wrapf(depErr, "failed to calculate hash for dependency %s with mode %d", dep.Name, depMode) + } + + fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) } + return nil + } + + if err := calcDepsHash(HashModeNeedOnlySelf); err != nil { + return "", err + } + + curSum = key.Sum() + hashRes[HashModeNeedDirectDeps] = hex.EncodeToString(curSum[:]) - fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, depHash) + if err := calcDepsHash(HashModeNeedAllDeps); err != nil { + return "", err } - h := key.Sum() - ret := hex.EncodeToString(h[:]) - c.pkgHashes.Store(pkg, ret) - return ret, nil + curSum = key.Sum() + hashRes[HashModeNeedAllDeps] = hex.EncodeToString(curSum[:]) + + if _, ok := hashRes[mode]; !ok { + return "", fmt.Errorf("invalid mode %d", mode) + } + + c.pkgHashes.Store(pkg, hashRes) + return hashRes[mode], nil } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go new file mode 100644 index 00000000000..7fa04e85ed9 --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/cache.go @@ -0,0 +1,86 @@ +package commands + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/golangci/golangci-lint/pkg/fsutils" + + "github.com/golangci/golangci-lint/internal/cache" + + "github.com/spf13/cobra" + + "github.com/golangci/golangci-lint/pkg/logutils" +) + +func (e *Executor) initCache() { + cacheCmd := &cobra.Command{ + Use: "cache", + Short: "Cache control and information", + Run: func(cmd *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache") + } + if err := cmd.Help(); err != nil { + e.log.Fatalf("Can't run cache: %s", err) + } + }, + } + e.rootCmd.AddCommand(cacheCmd) + + cacheCmd.AddCommand(&cobra.Command{ + Use: "clean", + Short: "Clean cache", + Run: e.executeCleanCache, + }) + cacheCmd.AddCommand(&cobra.Command{ + Use: "status", + Short: "Show cache status", + Run: e.executeCacheStatus, + }) + + // TODO: add trim command? +} + +func (e *Executor) executeCleanCache(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache clean") + } + + cacheDir := cache.DefaultDir() + if err := os.RemoveAll(cacheDir); err != nil { + e.log.Fatalf("Failed to remove dir %s: %s", cacheDir, err) + } + + os.Exit(0) +} + +func (e *Executor) executeCacheStatus(_ *cobra.Command, args []string) { + if len(args) != 0 { + e.log.Fatalf("Usage: golangci-lint cache status") + } + + cacheDir := cache.DefaultDir() + fmt.Fprintf(logutils.StdOut, "Dir: %s\n", cacheDir) + cacheSizeBytes, err := dirSizeBytes(cacheDir) + if err == nil { + fmt.Fprintf(logutils.StdOut, "Size: %s\n", fsutils.PrettifyBytesCount(cacheSizeBytes)) + } + + os.Exit(0) +} + +func dirSizeBytes(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return err + }) + return size, err +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go index 62429e726b5..975a3c50758 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/executor.go @@ -1,10 +1,25 @@ package commands import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/golangci/golangci-lint/internal/cache" + "github.com/fatih/color" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" + "github.com/gofrs/flock" + "github.com/golangci/golangci-lint/internal/pkgcache" "github.com/golangci/golangci-lint/pkg/config" "github.com/golangci/golangci-lint/pkg/fsutils" @@ -38,6 +53,7 @@ type Executor struct { sw *timeutils.Stopwatch loadGuard *load.Guard + flock *flock.Flock } func NewExecutor(version, commit, date string) *Executor { @@ -52,6 +68,9 @@ func NewExecutor(version, commit, date string) *Executor { e.debugf("Starting execution...") e.log = report.NewLogWrapper(logutils.NewStderrLog(""), &e.reportData) + if ok := e.acquireFileLock(); !ok { + e.log.Fatalf("Parallel golangci-lint is running") + } // to setup log level early we need to parse config from command line extra time to // find `-v` option @@ -83,6 +102,7 @@ func NewExecutor(version, commit, date string) *Executor { e.initConfig() e.initCompletion() e.initVersion() + e.initCache() // init e.cfg by values from config: flags parse will see these values // like the default ones. It will overwrite them only if the same option @@ -118,6 +138,9 @@ func NewExecutor(version, commit, date string) *Executor { e.loadGuard = load.NewGuard() e.contextLoader = lint.NewContextLoader(e.cfg, e.log.Child("loader"), e.goenv, e.lineCache, e.fileCache, e.pkgCache, e.loadGuard) + if err = e.initHashSalt(version); err != nil { + e.log.Fatalf("Failed to init hash salt: %s", err) + } e.debugf("Initialized executor") return e } @@ -125,3 +148,80 @@ func NewExecutor(version, commit, date string) *Executor { func (e *Executor) Execute() error { return e.rootCmd.Execute() } + +func (e *Executor) initHashSalt(version string) error { + binSalt, err := computeBinarySalt(version) + if err != nil { + return errors.Wrap(err, "failed to calculate binary salt") + } + + configSalt, err := computeConfigSalt(e.cfg) + if err != nil { + return errors.Wrap(err, "failed to calculate config salt") + } + + var b bytes.Buffer + b.Write(binSalt) + b.Write(configSalt) + cache.SetSalt(b.Bytes()) + return nil +} + +func computeBinarySalt(version string) ([]byte, error) { + if version != "" && version != "(devel)" { + return []byte(version), nil + } + + if logutils.HaveDebugTag("bin_salt") { + return []byte("debug"), nil + } + + p, err := os.Executable() + if err != nil { + return nil, err + } + f, err := os.Open(p) + if err != nil { + return nil, err + } + defer f.Close() + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +func computeConfigSalt(cfg *config.Config) ([]byte, error) { + configBytes, err := json.Marshal(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to json marshal config") + } + + h := sha256.New() + if n, err := h.Write(configBytes); n != len(configBytes) { + return nil, fmt.Errorf("failed to hash config bytes: wrote %d/%d bytes, error: %s", n, len(configBytes), err) + } + return h.Sum(nil), nil +} + +func (e *Executor) acquireFileLock() bool { + lockFile := filepath.Join(os.TempDir(), "golangci-lint.lock") + e.debugf("Locking on file %s...", lockFile) + f := flock.New(lockFile) + ctx, finish := context.WithTimeout(context.Background(), time.Minute) + defer finish() + + if ok, _ := f.TryLockContext(ctx, time.Second*3); !ok { + return false + } + + e.flock = f + return true +} + +func (e *Executor) releaseFileLock() { + if err := e.flock.Unlock(); err != nil { + e.debugf("Failed to unlock on file: %s", err) + } +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go index 21e7fa2e973..e61ecc94bb5 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/root.go @@ -73,6 +73,7 @@ func (e *Executor) persistentPostRun(_ *cobra.Command, _ []string) { trace.Stop() } + e.releaseFileLock() os.Exit(e.exitCode) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go index 7e5f429d1b8..4521a2e8807 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/commands/run.go @@ -54,6 +54,8 @@ func wh(text string) string { return color.GreenString(text) } +const defaultTimeout = time.Minute + //nolint:funlen func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, isFinalInit bool) { hideFlag := func(name string) { @@ -82,12 +84,17 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is // Run config rc := &cfg.Run + fs.StringVar(&rc.ModulesDownloadMode, "modules-download-mode", "", + "Modules download mode. If not empty, passed as -mod= to go tools") fs.IntVar(&rc.ExitCodeIfIssuesFound, "issues-exit-code", exitcodes.IssuesFound, wh("Exit code when issues were found")) fs.StringSliceVar(&rc.BuildTags, "build-tags", nil, wh("Build tags")) - fs.DurationVar(&rc.Timeout, "deadline", time.Minute, wh("Deadline for total work")) - hideFlag("deadline") - fs.DurationVar(&rc.Timeout, "timeout", time.Minute, wh("Timeout for total work")) + + fs.DurationVar(&rc.Timeout, "deadline", defaultTimeout, wh("Deadline for total work")) + if err := fs.MarkHidden("deadline"); err != nil { + panic(err) + } + fs.DurationVar(&rc.Timeout, "timeout", defaultTimeout, wh("Timeout for total work")) fs.BoolVar(&rc.AnalyzeTests, "tests", true, wh("Analyze tests (*_test.go)")) fs.BoolVar(&rc.PrintResourcesUsage, "print-resources-usage", false, @@ -166,6 +173,11 @@ func initFlagSet(fs *pflag.FlagSet, cfg *config.Config, m *lintersdb.Manager, is fs.StringSliceVarP(&lc.Enable, "enable", "E", nil, wh("Enable specific linter")) fs.StringSliceVarP(&lc.Disable, "disable", "D", nil, wh("Disable specific linter")) fs.BoolVar(&lc.EnableAll, "enable-all", false, wh("Enable all linters")) + if err := fs.MarkHidden("enable-all"); err != nil { + panic(err) + } + // TODO: run hideFlag("enable-all") to print deprecation message. + fs.BoolVar(&lc.DisableAll, "disable-all", false, wh("Disable all linters")) fs.StringSliceVarP(&lc.Presets, "presets", "p", nil, wh(fmt.Sprintf("Enable presets (%s) of linters. Run 'golangci-lint linters' to see "+ @@ -390,6 +402,7 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { } }() + e.setTimeoutToDeadlineIfOnlyDeadlineIsSet() ctx, cancel := context.WithTimeout(context.Background(), e.cfg.Run.Timeout) defer cancel() @@ -411,6 +424,15 @@ func (e *Executor) executeRun(_ *cobra.Command, args []string) { e.setupExitCode(ctx) } +// to be removed when deadline is finally decommissioned +func (e *Executor) setTimeoutToDeadlineIfOnlyDeadlineIsSet() { + //lint:ignore SA1019 We want to promoted the deprecated config value when needed + deadlineValue := e.cfg.Run.Deadline // nolint: staticcheck + if deadlineValue != 0 && e.cfg.Run.Timeout == defaultTimeout { + e.cfg.Run.Timeout = deadlineValue + } +} + func (e *Executor) setupExitCode(ctx context.Context) { if ctx.Err() != nil { e.exitCode = exitcodes.Timeout @@ -448,7 +470,6 @@ func watchResources(ctx context.Context, done chan struct{}, logger logutils.Log const MB = 1024 * 1024 track := func() { - debugf("Starting memory tracing iteration ...") var m runtime.MemStats runtime.ReadMemStats(&m) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go index 90b0669683f..1b8de67585f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/config/config.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/config/config.go @@ -255,9 +255,11 @@ type GocognitSettings struct { } type WSLSettings struct { - StrictAppend bool `mapstructure:"strict-append"` - AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` - AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + StrictAppend bool `mapstructure:"strict-append"` + AllowAssignAndCallCuddle bool `mapstructure:"allow-assign-and-call"` + AllowMultiLineAssignCuddle bool `mapstructure:"allow-multiline-assign"` + AllowCaseTrailingWhitespace bool `mapstructure:"allow-case-trailing-whitespace"` + AllowCuddleDeclaration bool `mapstructure:"allow-cuddle-declarations"` } var defaultLintersSettings = LintersSettings{ @@ -289,9 +291,11 @@ var defaultLintersSettings = LintersSettings{ MinComplexity: 30, }, WSL: WSLSettings{ - StrictAppend: true, - AllowAssignAndCallCuddle: true, - AllowMultiLineAssignCuddle: true, + StrictAppend: true, + AllowAssignAndCallCuddle: true, + AllowMultiLineAssignCuddle: true, + AllowCaseTrailingWhitespace: true, + AllowCuddleDeclaration: false, }, } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go index 7c74a29973a..536f9036142 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/exitcodes/exitcodes.go @@ -30,3 +30,5 @@ var ( Code: Failure, } ) + +// 1 diff --git a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go index b3c282429fc..2b17a039861 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/fsutils/filecache.go @@ -33,7 +33,7 @@ func (fc *FileCache) GetFileBytes(filePath string) ([]byte, error) { return fileBytes, nil } -func prettifyBytesCount(n int) string { +func PrettifyBytesCount(n int64) string { const ( Multiplexer = 1024 KiB = 1 * Multiplexer @@ -54,14 +54,14 @@ func prettifyBytesCount(n int) string { } func (fc *FileCache) PrintStats(log logutils.Log) { - var size int + var size int64 var mapLen int fc.files.Range(func(_, fileBytes interface{}) bool { mapLen++ - size += len(fileBytes.([]byte)) + size += int64(len(fileBytes.([]byte))) return true }) - log.Infof("File cache stats: %d entries of total size %s", mapLen, prettifyBytesCount(size)) + log.Infof("File cache stats: %d entries of total size %s", mapLen, PrettifyBytesCount(size)) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go index 269275ceb59..9889dad4ed6 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/deadcode.go @@ -15,10 +15,10 @@ import ( func NewDeadcode() *goanalysis.Linter { const linterName = "deadcode" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { prog := goanalysis.MakeFakeLoaderProgram(pass) @@ -26,13 +26,13 @@ func NewDeadcode() *goanalysis.Linter { if err != nil { return nil, err } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.UnusedIdentName, nil)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() resIssues = append(resIssues, res...) @@ -46,7 +46,7 @@ func NewDeadcode() *goanalysis.Linter { "Finds unused code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go index 1b73fc076aa..611f6d4950b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/depguard.go @@ -49,10 +49,10 @@ func setupDepguardPackages(dg *depguard.Depguard, lintCtx *linter.Context) { func NewDepguard() *goanalysis.Linter { const linterName = "depguard" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -88,17 +88,17 @@ func NewDepguard() *goanalysis.Linter { if dg.ListType == depguard.LTWhitelist { msgSuffix = "is not in the whitelist" } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { userSuppliedMsgSuffix := dgSettings.PackagesWithErrorMessage[i.PackageName] if userSuppliedMsgSuffix != "" { userSuppliedMsgSuffix = ": " + userSuppliedMsgSuffix } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Position, Text: fmt.Sprintf("%s %s%s", formatCode(i.PackageName, lintCtx.Cfg), msgSuffix, userSuppliedMsgSuffix), FromLinter: linterName, - }) + }, pass)) } mu.Lock() resIssues = append(resIssues, res...) @@ -106,7 +106,7 @@ func NewDepguard() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go index 4cddf40064f..8978ff913dc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dogsled.go @@ -17,10 +17,10 @@ const dogsledLinterName = "dogsled" func NewDogsled() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: dogsledLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -30,14 +30,16 @@ func NewDogsled() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var pkgIssues []result.Issue + var pkgIssues []goanalysis.Issue for _, f := range pass.Files { v := returnsVisitor{ maxBlanks: lintCtx.Settings().Dogsled.MaxBlankIdentifiers, f: pass.Fset, } ast.Walk(&v, f) - pkgIssues = append(pkgIssues, v.issues...) + for i := range v.issues { + pkgIssues = append(pkgIssues, goanalysis.NewIssue(&v.issues[i], pass)) + } } mu.Lock() @@ -46,7 +48,7 @@ func NewDogsled() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go index deb14900680..d6dc67fbb0e 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/dupl.go @@ -19,10 +19,10 @@ const duplLinterName = "dupl" func NewDupl() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: duplLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -47,7 +47,7 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { toFilename, err := fsutils.ShortestRelPath(i.To.Filename(), "") if err != nil { @@ -57,7 +57,7 @@ func NewDupl() *goanalysis.Linter { text := fmt.Sprintf("%d-%d lines are duplicate of %s", i.From.LineStart(), i.From.LineEnd(), formatCode(dupl, lintCtx.Cfg)) - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.From.Filename(), Line: i.From.LineStart(), @@ -68,7 +68,7 @@ func NewDupl() *goanalysis.Linter { }, Text: text, FromLinter: duplLinterName, - }) + }, pass)) } mu.Lock() @@ -77,7 +77,7 @@ func NewDupl() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go index 0e1a94d3f0a..bf6b9a45348 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/errcheck.go @@ -24,9 +24,9 @@ import ( func NewErrcheck() *goanalysis.Linter { const linterName = "errcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -51,7 +51,7 @@ func NewErrcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(errcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(errcheckIssues)) for _, i := range errcheckIssues { var text string if i.FuncName != "" { @@ -59,18 +59,18 @@ func NewErrcheck() *goanalysis.Linter { } else { text = "Error return value is not checked" } - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: linterName, Text: text, Pos: i.Pos, - }) + }, pass)) } mu.Lock() res = append(res, issues...) mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go index d00485f17c0..3031da48350 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/funlen.go @@ -17,10 +17,10 @@ const funlenLinterName = "funlen" func NewFunlen() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: funlenLinterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,16 +40,16 @@ func NewFunlen() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { - res[k] = result.Issue{ + res[k] = goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.Pos.Filename, Line: i.Pos.Line, }, Text: strings.TrimRight(i.Message, "\n"), FromLinter: funlenLinterName, - } + }, pass) } mu.Lock() @@ -58,7 +58,7 @@ func NewFunlen() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go new file mode 100644 index 00000000000..b90a2912b9a --- /dev/null +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/issue.go @@ -0,0 +1,29 @@ +package goanalysis + +import ( + "go/token" + + "golang.org/x/tools/go/analysis" + + "github.com/golangci/golangci-lint/pkg/result" +) + +type Issue struct { + result.Issue + Pass *analysis.Pass +} + +func NewIssue(i *result.Issue, pass *analysis.Pass) Issue { + return Issue{ + Issue: *i, + Pass: pass, + } +} + +type EncodingIssue struct { + FromLinter string + Text string + Pos token.Position + LineRange *result.Range + Replacement *result.Replacement +} diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go index ca35400ec81..bad7ab4ffbb 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/linter.go @@ -4,7 +4,17 @@ import ( "context" "flag" "fmt" + "runtime" + "sort" "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golangci/golangci-lint/pkg/timeutils" + + "github.com/golangci/golangci-lint/internal/pkgcache" + "github.com/golangci/golangci-lint/pkg/logutils" "golang.org/x/tools/go/packages" @@ -30,6 +40,8 @@ const ( LoadModeWholeProgram ) +var issuesCacheDebugf = logutils.Debug("goanalysis/issues/cache") + func (loadMode LoadMode) String() string { switch loadMode { case LoadModeNone: @@ -45,14 +57,14 @@ func (loadMode LoadMode) String() string { } type Linter struct { - name, desc string - analyzers []*analysis.Analyzer - cfg map[string]map[string]interface{} - issuesReporter func(*linter.Context) []result.Issue - contextSetter func(*linter.Context) - loadMode LoadMode - useOriginalPackages bool - isTypecheckMode bool + name, desc string + analyzers []*analysis.Analyzer + cfg map[string]map[string]interface{} + issuesReporter func(*linter.Context) []Issue + contextSetter func(*linter.Context) + loadMode LoadMode + needUseOriginalPackages bool + isTypecheckModeOn bool } func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string]map[string]interface{}) *Linter { @@ -60,11 +72,11 @@ func NewLinter(name, desc string, analyzers []*analysis.Analyzer, cfg map[string } func (lnt *Linter) UseOriginalPackages() { - lnt.useOriginalPackages = true + lnt.needUseOriginalPackages = true } func (lnt *Linter) SetTypecheckMode() { - lnt.isTypecheckMode = true + lnt.isTypecheckModeOn = true } func (lnt *Linter) LoadMode() LoadMode { @@ -76,7 +88,7 @@ func (lnt *Linter) WithLoadMode(loadMode LoadMode) *Linter { return lnt } -func (lnt *Linter) WithIssuesReporter(r func(*linter.Context) []result.Issue) *Linter { +func (lnt *Linter) WithIssuesReporter(r func(*linter.Context) []Issue) *Linter { lnt.issuesReporter = r return lnt } @@ -198,6 +210,7 @@ func buildIssuesFromErrorsForTypecheckMode(errs []error, lintCtx *linter.Context uniqReportedIssues[err.Msg] = true lintCtx.Log.Errorf("typechecking error: %s", err.Msg) } else { + i.Pkg = itErr.Pkg // to save to cache later issues = append(issues, *i) } } @@ -209,48 +222,270 @@ func buildIssues(diags []Diagnostic, linterNameBuilder func(diag *Diagnostic) st var issues []result.Issue for i := range diags { diag := &diags[i] + linterName := linterNameBuilder(diag) var text string - if diag.Analyzer.Name == TheOnlyAnalyzerName { + if diag.Analyzer.Name == linterName { text = diag.Message } else { text = fmt.Sprintf("%s: %s", diag.Analyzer.Name, diag.Message) } issues = append(issues, result.Issue{ - FromLinter: linterNameBuilder(diag), + FromLinter: linterName, Text: text, Pos: diag.Position, + Pkg: diag.Pkg, }) } return issues } -func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { +func (lnt *Linter) preRun(lintCtx *linter.Context) error { if err := analysis.Validate(lnt.analyzers); err != nil { - return nil, errors.Wrap(err, "failed to validate analyzers") + return errors.Wrap(err, "failed to validate analyzers") } if err := lnt.configure(); err != nil { - return nil, errors.Wrap(err, "failed to configure analyzers") + return errors.Wrap(err, "failed to configure analyzers") } if lnt.contextSetter != nil { lnt.contextSetter(lintCtx) } - loadMode := lnt.loadMode - runner := newRunner(lnt.name, lintCtx.Log.Child("goanalysis"), - lintCtx.PkgCache, lintCtx.LoadGuard, loadMode) + return nil +} + +func (lnt *Linter) getName() string { + return lnt.name +} + +func (lnt *Linter) getLinterNameForDiagnostic(*Diagnostic) string { + return lnt.name +} + +func (lnt *Linter) getAnalyzers() []*analysis.Analyzer { + return lnt.analyzers +} + +func (lnt *Linter) useOriginalPackages() bool { + return lnt.needUseOriginalPackages +} + +func (lnt *Linter) isTypecheckMode() bool { + return lnt.isTypecheckModeOn +} + +func (lnt *Linter) reportIssues(lintCtx *linter.Context) []Issue { + if lnt.issuesReporter != nil { + return lnt.issuesReporter(lintCtx) + } + return nil +} + +func (lnt *Linter) getLoadMode() LoadMode { + return lnt.loadMode +} + +type runAnalyzersConfig interface { + getName() string + getLinterNameForDiagnostic(*Diagnostic) string + getAnalyzers() []*analysis.Analyzer + useOriginalPackages() bool + isTypecheckMode() bool + reportIssues(*linter.Context) []Issue + getLoadMode() LoadMode +} + +func getIssuesCacheKey(analyzers []*analysis.Analyzer) string { + return "lint/result:" + analyzersHashID(analyzers) +} + +func saveIssuesToCache(allPkgs []*packages.Package, pkgsFromCache map[*packages.Package]bool, + issues []result.Issue, lintCtx *linter.Context, analyzers []*analysis.Analyzer) { + startedAt := time.Now() + perPkgIssues := map[*packages.Package][]result.Issue{} + for ind := range issues { + i := &issues[ind] + perPkgIssues[i.Pkg] = append(perPkgIssues[i.Pkg], *i) + } + + savedIssuesCount := int32(0) + lintResKey := getIssuesCacheKey(analyzers) + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(allPkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + pkgIssues := perPkgIssues[pkg] + encodedIssues := make([]EncodingIssue, 0, len(pkgIssues)) + for ind := range pkgIssues { + i := &pkgIssues[ind] + encodedIssues = append(encodedIssues, EncodingIssue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + }) + } + + atomic.AddInt32(&savedIssuesCount, int32(len(encodedIssues))) + if err := lintCtx.PkgCache.Put(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, encodedIssues); err != nil { + lintCtx.Log.Infof("Failed to save package %s issues (%d) to cache: %s", pkg, len(pkgIssues), err) + } else { + issuesCacheDebugf("Saved package %s issues (%d) to cache", pkg, len(pkgIssues)) + } + } + }() + } + + for _, pkg := range allPkgs { + if pkgsFromCache[pkg] { + continue + } + + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + issuesCacheDebugf("Saved %d issues from %d packages to cache in %s", savedIssuesCount, len(allPkgs), time.Since(startedAt)) +} + +//nolint:gocritic +func loadIssuesFromCache(pkgs []*packages.Package, lintCtx *linter.Context, + analyzers []*analysis.Analyzer) ([]result.Issue, map[*packages.Package]bool) { + startedAt := time.Now() + + lintResKey := getIssuesCacheKey(analyzers) + type cacheRes struct { + issues []result.Issue + loadErr error + } + pkgToCacheRes := make(map[*packages.Package]*cacheRes, len(pkgs)) + for _, pkg := range pkgs { + pkgToCacheRes[pkg] = &cacheRes{} + } + + workerCount := runtime.GOMAXPROCS(-1) + var wg sync.WaitGroup + wg.Add(workerCount) + + pkgCh := make(chan *packages.Package, len(pkgs)) + for i := 0; i < workerCount; i++ { + go func() { + defer wg.Done() + for pkg := range pkgCh { + var pkgIssues []EncodingIssue + err := lintCtx.PkgCache.Get(pkg, pkgcache.HashModeNeedAllDeps, lintResKey, &pkgIssues) + cacheRes := pkgToCacheRes[pkg] + cacheRes.loadErr = err + if err != nil { + continue + } + if len(pkgIssues) == 0 { + continue + } + + issues := make([]result.Issue, 0, len(pkgIssues)) + for _, i := range pkgIssues { + issues = append(issues, result.Issue{ + FromLinter: i.FromLinter, + Text: i.Text, + Pos: i.Pos, + LineRange: i.LineRange, + Replacement: i.Replacement, + Pkg: pkg, + }) + } + cacheRes.issues = issues + } + }() + } + + for _, pkg := range pkgs { + pkgCh <- pkg + } + close(pkgCh) + wg.Wait() + + loadedIssuesCount := 0 + var issues []result.Issue + pkgsFromCache := map[*packages.Package]bool{} + for pkg, cacheRes := range pkgToCacheRes { + if cacheRes.loadErr == nil { + loadedIssuesCount += len(cacheRes.issues) + pkgsFromCache[pkg] = true + issues = append(issues, cacheRes.issues...) + issuesCacheDebugf("Loaded package %s issues (%d) from cache", pkg, len(cacheRes.issues)) + } else { + issuesCacheDebugf("Didn't load package %s issues from cache: %s", pkg, cacheRes.loadErr) + } + } + issuesCacheDebugf("Loaded %d issues from cache in %s, analyzing %d/%d packages", + loadedIssuesCount, time.Since(startedAt), len(pkgs)-len(pkgsFromCache), len(pkgs)) + return issues, pkgsFromCache +} + +func runAnalyzers(cfg runAnalyzersConfig, lintCtx *linter.Context) ([]result.Issue, error) { + log := lintCtx.Log.Child("goanalysis") + sw := timeutils.NewStopwatch("analyzers", log) + defer sw.PrintTopStages(10) + + runner := newRunner(cfg.getName(), log, lintCtx.PkgCache, lintCtx.LoadGuard, cfg.getLoadMode(), sw) pkgs := lintCtx.Packages - if lnt.useOriginalPackages { + if cfg.useOriginalPackages() { pkgs = lintCtx.OriginalPackages } - diags, errs := runner.run(lnt.analyzers, pkgs) + issues, pkgsFromCache := loadIssuesFromCache(pkgs, lintCtx, cfg.getAnalyzers()) + var pkgsToAnalyze []*packages.Package + for _, pkg := range pkgs { + if !pkgsFromCache[pkg] { + pkgsToAnalyze = append(pkgsToAnalyze, pkg) + } + } - linterNameBuilder := func(*Diagnostic) string { return lnt.Name() } - if lnt.isTypecheckMode { - return buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) + diags, errs, passToPkg := runner.run(cfg.getAnalyzers(), pkgsToAnalyze) + + defer func() { + if len(errs) == 0 { + // If we try to save to cache even if we have compilation errors + // we won't see them on repeated runs. + saveIssuesToCache(pkgs, pkgsFromCache, issues, lintCtx, cfg.getAnalyzers()) + } + }() + + buildAllIssues := func() []result.Issue { + var retIssues []result.Issue + reportedIssues := cfg.reportIssues(lintCtx) + for i := range reportedIssues { + issue := &reportedIssues[i].Issue + if issue.Pkg == nil { + issue.Pkg = passToPkg[reportedIssues[i].Pass] + } + retIssues = append(retIssues, *issue) + } + retIssues = append(retIssues, buildIssues(diags, cfg.getLinterNameForDiagnostic)...) + return retIssues + } + + if cfg.isTypecheckMode() { + errIssues, err := buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) + if err != nil { + return nil, err + } + + issues = append(issues, errIssues...) + issues = append(issues, buildAllIssues()...) + + return issues, nil } // Don't print all errs: they can duplicate. @@ -258,12 +493,24 @@ func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.I return nil, errs[0] } - var issues []result.Issue - if lnt.issuesReporter != nil { - issues = append(issues, lnt.issuesReporter(lintCtx)...) - } else { - issues = buildIssues(diags, linterNameBuilder) + issues = append(issues, buildAllIssues()...) + return issues, nil +} + +func (lnt *Linter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + if err := lnt.preRun(lintCtx); err != nil { + return nil, err } - return issues, nil + return runAnalyzers(lnt, lintCtx) +} + +func analyzersHashID(analyzers []*analysis.Analyzer) string { + names := make([]string, 0, len(analyzers)) + for _, a := range analyzers { + names = append(names, a.Name) + } + + sort.Strings(names) + return strings.Join(names, ",") } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go index 96ea36a4b20..5975e2057a1 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/metalinter.go @@ -11,11 +11,14 @@ import ( ) type MetaLinter struct { - linters []*Linter + linters []*Linter + analyzerToLinterName map[*analysis.Analyzer]string } func NewMetaLinter(linters []*Linter) *MetaLinter { - return &MetaLinter{linters: linters} + ml := &MetaLinter{linters: linters} + ml.analyzerToLinterName = ml.getAnalyzerToLinterNameMapping() + return ml } func (ml MetaLinter) Name() string { @@ -28,7 +31,7 @@ func (ml MetaLinter) Desc() string { func (ml MetaLinter) isTypecheckMode() bool { for _, linter := range ml.linters { - if linter.isTypecheckMode { + if linter.isTypecheckMode() { return true } } @@ -45,15 +48,7 @@ func (ml MetaLinter) getLoadMode() LoadMode { return loadMode } -func (ml MetaLinter) runContextSetters(lintCtx *linter.Context) { - for _, linter := range ml.linters { - if linter.contextSetter != nil { - linter.contextSetter(lintCtx) - } - } -} - -func (ml MetaLinter) getAllAnalyzers() []*analysis.Analyzer { +func (ml MetaLinter) getAnalyzers() []*analysis.Analyzer { var allAnalyzers []*analysis.Analyzer for _, linter := range ml.linters { allAnalyzers = append(allAnalyzers, linter.analyzers...) @@ -61,75 +56,44 @@ func (ml MetaLinter) getAllAnalyzers() []*analysis.Analyzer { return allAnalyzers } -func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string { - analyzerToLinterName := map[*analysis.Analyzer]string{} - for _, linter := range ml.linters { - for _, a := range linter.analyzers { - analyzerToLinterName[a] = linter.Name() - } - } - return analyzerToLinterName +func (ml MetaLinter) getName() string { + return "metalinter" } -func (ml MetaLinter) configure() error { - for _, linter := range ml.linters { - if err := linter.configure(); err != nil { - return errors.Wrapf(err, "failed to configure analyzers of %s", linter.Name()) - } - } - return nil +func (ml MetaLinter) useOriginalPackages() bool { + return false // `unused` can't be run by this metalinter } -func (ml MetaLinter) validate() error { - for _, linter := range ml.linters { - if err := analysis.Validate(linter.analyzers); err != nil { - return errors.Wrapf(err, "failed to validate analyzers of %s", linter.Name()) +func (ml MetaLinter) reportIssues(lintCtx *linter.Context) []Issue { + var ret []Issue + for _, lnt := range ml.linters { + if lnt.issuesReporter != nil { + ret = append(ret, lnt.issuesReporter(lintCtx)...) } } - return nil + return ret } -func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { - if err := ml.validate(); err != nil { - return nil, err - } - - if err := ml.configure(); err != nil { - return nil, err - } - ml.runContextSetters(lintCtx) - - analyzerToLinterName := ml.getAnalyzerToLinterNameMapping() - - runner := newRunner("metalinter", lintCtx.Log.Child("goanalysis"), - lintCtx.PkgCache, lintCtx.LoadGuard, ml.getLoadMode()) - - diags, errs := runner.run(ml.getAllAnalyzers(), lintCtx.Packages) - - buildAllIssues := func() []result.Issue { - linterNameBuilder := func(diag *Diagnostic) string { return analyzerToLinterName[diag.Analyzer] } - issues := buildIssues(diags, linterNameBuilder) +func (ml MetaLinter) getLinterNameForDiagnostic(diag *Diagnostic) string { + return ml.analyzerToLinterName[diag.Analyzer] +} - for _, linter := range ml.linters { - if linter.issuesReporter != nil { - issues = append(issues, linter.issuesReporter(lintCtx)...) - } +func (ml MetaLinter) getAnalyzerToLinterNameMapping() map[*analysis.Analyzer]string { + analyzerToLinterName := map[*analysis.Analyzer]string{} + for _, linter := range ml.linters { + for _, a := range linter.analyzers { + analyzerToLinterName[a] = linter.Name() } - return issues } + return analyzerToLinterName +} - if ml.isTypecheckMode() { - issues, err := buildIssuesFromErrorsForTypecheckMode(errs, lintCtx) - if err != nil { - return nil, err +func (ml MetaLinter) Run(ctx context.Context, lintCtx *linter.Context) ([]result.Issue, error) { + for _, linter := range ml.linters { + if err := linter.preRun(lintCtx); err != nil { + return nil, errors.Wrapf(err, "failed to pre-run %s", linter.Name()) } - return append(issues, buildAllIssues()...), nil - } - - // Don't print all errs: they can duplicate. - if len(errs) != 0 { - return nil, errs[0] } - return buildAllIssues(), nil + return runAnalyzers(ml, lintCtx) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go index e194a811c2d..5d8470dbd04 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goanalysis/runner.go @@ -28,6 +28,8 @@ import ( "sync/atomic" "time" + "github.com/golangci/golangci-lint/pkg/timeutils" + "github.com/pkg/errors" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/gcexportdata" @@ -69,23 +71,30 @@ type Diagnostic struct { analysis.Diagnostic Analyzer *analysis.Analyzer Position token.Position + Pkg *packages.Package } type runner struct { - log logutils.Log - prefix string // ensure unique analyzer names - pkgCache *pkgcache.Cache - loadGuard *load.Guard - loadMode LoadMode + log logutils.Log + prefix string // ensure unique analyzer names + pkgCache *pkgcache.Cache + loadGuard *load.Guard + loadMode LoadMode + passToPkg map[*analysis.Pass]*packages.Package + passToPkgGuard sync.Mutex + sw *timeutils.Stopwatch } -func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, loadMode LoadMode) *runner { +func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loadGuard *load.Guard, + loadMode LoadMode, sw *timeutils.Stopwatch) *runner { return &runner{ prefix: prefix, log: logger, pkgCache: pkgCache, loadGuard: loadGuard, loadMode: loadMode, + passToPkg: map[*analysis.Pass]*packages.Package{}, + sw: sw, } } @@ -95,12 +104,14 @@ func newRunner(prefix string, logger logutils.Log, pkgCache *pkgcache.Cache, loa // It provides most of the logic for the main functions of both the // singlechecker and the multi-analysis commands. // It returns the appropriate exit code. -func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, []error) { +func (r *runner) run(analyzers []*analysis.Analyzer, initialPackages []*packages.Package) ([]Diagnostic, + []error, map[*analysis.Pass]*packages.Package) { debugf("Analyzing %d packages on load mode %s", len(initialPackages), r.loadMode) defer r.pkgCache.Trim() roots := r.analyze(initialPackages, analyzers) - return extractDiagnostics(roots) + diags, errs := extractDiagnostics(roots) + return diags, errs, r.passToPkg } type actKey struct { @@ -138,9 +149,7 @@ func (r *runner) makeAction(a *analysis.Analyzer, pkg *packages.Package, act = actAlloc.alloc() act.a = a act.pkg = pkg - act.log = r.log - act.prefix = r.prefix - act.pkgCache = r.pkgCache + act.r = r act.isInitialPkg = initialPkgs[pkg] act.needAnalyzeSource = initialPkgs[pkg] act.analysisDoneCh = make(chan struct{}) @@ -362,7 +371,13 @@ func extractDiagnostics(roots []*action) (retDiags []Diagnostic, retErrors []err } seen[k] = true - retDiags = append(retDiags, Diagnostic{Diagnostic: diag, Analyzer: act.a, Position: posn}) + retDiag := Diagnostic{ + Diagnostic: diag, + Analyzer: act.a, + Position: posn, + Pkg: act.pkg, + } + retDiags = append(retDiags, retDiag) } } } @@ -404,9 +419,7 @@ type action struct { result interface{} diagnostics []analysis.Diagnostic err error - log logutils.Log - prefix string - pkgCache *pkgcache.Cache + r *runner analysisDoneCh chan struct{} loadCachedFactsDone bool loadCachedFactsOk bool @@ -473,7 +486,9 @@ func (act *action) analyzeSafe() { act.a.Name, act.pkg.Name, act.isInitialPkg, act.needAnalyzeSource, p), debug.Stack()) } }() - act.analyze() + act.r.sw.TrackStage(act.a.Name, func() { + act.analyze() + }) } func (act *action) analyze() { @@ -483,22 +498,8 @@ func (act *action) analyze() { return } - // TODO(adonovan): uncomment this during profiling. - // It won't build pre-go1.11 but conditional compilation - // using build tags isn't warranted. - // - // ctx, task := trace.NewTask(context.Background(), "exec") - // trace.Log(ctx, "pass", act.String()) - // defer task.End() - - // Record time spent in this node but not its dependencies. - // In parallel mode, due to GC/scheduler contention, the - // time is 5x higher than in sequential mode, even with a - // semaphore limiting the number of threads here. - // So use -debug=tp. - defer func(now time.Time) { - analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.prefix, act.a.Name, act.pkg.Name, time.Since(now)) + analyzeDebugf("go/analysis: %s: %s: analyzed package %q in %s", act.r.prefix, act.a.Name, act.pkg.Name, time.Since(now)) }(time.Now()) // Report an error if any dependency failed. @@ -552,6 +553,9 @@ func (act *action) analyze() { AllPackageFacts: act.allPackageFacts, } act.pass = pass + act.r.passToPkgGuard.Lock() + act.r.passToPkg[pass] = act.pkg + act.r.passToPkgGuard.Unlock() var err error if act.pkg.IllTyped { @@ -574,7 +578,7 @@ func (act *action) analyze() { pass.ExportPackageFact = nil if err := act.persistFactsToCache(); err != nil { - act.log.Warnf("Failed to persist facts to cache: %s", err) + act.r.log.Warnf("Failed to persist facts to cache: %s", err) } } @@ -598,7 +602,7 @@ func inheritFacts(act, dep *action) { var err error fact, err = codeFact(fact) if err != nil { - act.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } } @@ -618,7 +622,7 @@ func inheritFacts(act, dep *action) { var err error fact, err = codeFact(fact) if err != nil { - act.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) + act.r.log.Panicf("internal error: encoding of %T fact failed in %v", fact, act) } } @@ -695,7 +699,7 @@ func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool { // exportObjectFact implements Pass.ExportObjectFact. func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) { if obj.Pkg() != act.pkg.Types { - act.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", + act.r.log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package", act.a, act.pkg, obj, fact) } @@ -756,7 +760,7 @@ func (act *action) allPackageFacts() []analysis.PackageFact { func (act *action) factType(fact analysis.Fact) reflect.Type { t := reflect.TypeOf(fact) if t.Kind() != reflect.Ptr { - act.log.Fatalf("invalid Fact type: got %T, want pointer", t) + act.r.log.Fatalf("invalid Fact type: got %T, want pointer", t) } return t } @@ -806,15 +810,15 @@ func (act *action) persistFactsToCache() error { factsCacheDebugf("Caching %d facts for package %q and analyzer %s", len(facts), act.pkg.Name, act.a.Name) key := fmt.Sprintf("%s/facts", analyzer.Name) - return act.pkgCache.Put(act.pkg, key, facts) + return act.r.pkgCache.Put(act.pkg, pkgcache.HashModeNeedAllDeps, key, facts) } func (act *action) loadPersistedFacts() bool { var facts []Fact key := fmt.Sprintf("%s/facts", act.a.Name) - if err := act.pkgCache.Get(act.pkg, key, &facts); err != nil { + if err := act.r.pkgCache.Get(act.pkg, pkgcache.HashModeNeedAllDeps, key, &facts); err != nil { if err != pkgcache.ErrMissing { - act.log.Warnf("Failed to get persisted facts: %s", err) + act.r.log.Warnf("Failed to get persisted facts: %s", err) } factsCacheDebugf("No cached facts for package %q and analyzer %s", act.pkg.Name, act.a.Name) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go index 244ebfc3acd..f2166416be3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoglobals.go @@ -19,15 +19,18 @@ const gochecknoglobalsName = "gochecknoglobals" //nolint:dupl func NewGochecknoglobals() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gochecknoglobalsName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { - res = append(res, checkFileForGlobals(file, pass.Fset)...) + fileIssues := checkFileForGlobals(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -45,7 +48,7 @@ func NewGochecknoglobals() *goanalysis.Linter { "Checks that no globals are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go index 7d9140807ba..18465b13068 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gochecknoinits.go @@ -18,15 +18,18 @@ const gochecknoinitsName = "gochecknoinits" //nolint:dupl func NewGochecknoinits() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gochecknoinitsName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { - res = append(res, checkFileForInits(file, pass.Fset)...) + fileIssues := checkFileForInits(file, pass.Fset) + for i := range fileIssues { + res = append(res, goanalysis.NewIssue(&fileIssues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -44,7 +47,7 @@ func NewGochecknoinits() *goanalysis.Linter { "Checks that no init functions are present in Go code", []*analysis.Analyzer{analyzer}, nil, - ).WithIssuesReporter(func(*linter.Context) []result.Issue { + ).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go index 8a3b65cd28d..71301180874 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocognit.go @@ -18,7 +18,7 @@ const gocognitName = "gocognit" func NewGocognit() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, @@ -43,18 +43,18 @@ func NewGocognit() *goanalysis.Linter { return stats[i].Complexity > stats[j].Complexity }) - res := make([]result.Issue, 0, len(stats)) + res := make([]goanalysis.Issue, 0, len(stats)) for _, s := range stats { if s.Complexity <= lintCtx.Settings().Gocognit.MinComplexity { break // Break as the stats is already sorted from greatest to least } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: s.Pos, Text: fmt.Sprintf("cognitive complexity %d of func %s is high (> %d)", s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocognit.MinComplexity), FromLinter: gocognitName, - }) + }, pass)) } mu.Lock() @@ -63,7 +63,7 @@ func NewGocognit() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go index 00787c8cbcb..c77dc64e839 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goconst.go @@ -16,10 +16,10 @@ const goconstName = "goconst" func NewGoconst() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: goconstName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,12 +40,12 @@ func NewGoconst() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } -func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issue, error) { +func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]goanalysis.Issue, error) { cfg := goconstAPI.Config{ MatchWithConstants: true, MinStringLength: lintCtx.Settings().Goconst.MinStringLen, @@ -61,7 +61,7 @@ func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issu return nil, nil } - res := make([]result.Issue, 0, len(goconstIssues)) + res := make([]goanalysis.Issue, 0, len(goconstIssues)) for _, i := range goconstIssues { textBegin := fmt.Sprintf("string %s has %d occurrences", formatCode(i.Str, lintCtx.Cfg), i.OccurencesCount) var textEnd string @@ -70,11 +70,11 @@ func checkConstants(pass *analysis.Pass, lintCtx *linter.Context) ([]result.Issu } else { textEnd = fmt.Sprintf(", but such constant %s already exists", formatCode(i.MatchingConst, lintCtx.Cfg)) } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: textBegin + textEnd, FromLinter: goconstName, - }) + }, pass)) } return res, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go index d040c44dca1..fb29252096b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocritic.go @@ -23,12 +23,12 @@ const gocriticName = "gocritic" func NewGocritic() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue sizes := types.SizesFor("gc", runtime.GOARCH) analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gocriticName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -45,7 +45,11 @@ func NewGocritic() *goanalysis.Linter { } lintpackCtx.SetPackageInfo(pass.TypesInfo, pass.Pkg) - res := runGocriticOnPackage(lintpackCtx, enabledCheckers, pass.Files) + var res []goanalysis.Issue + pkgIssues := runGocriticOnPackage(lintpackCtx, enabledCheckers, pass.Files) + for i := range pkgIssues { + res = append(res, goanalysis.NewIssue(&pkgIssues[i], pass)) + } if len(res) == 0 { return nil, nil } @@ -56,7 +60,7 @@ func NewGocritic() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go index 8e11775b2f8..4fb288989be 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gocyclo.go @@ -18,10 +18,10 @@ const gocycloName = "gocyclo" func NewGocyclo() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gocycloName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -43,18 +43,18 @@ func NewGocyclo() *goanalysis.Linter { return stats[i].Complexity > stats[j].Complexity }) - res := make([]result.Issue, 0, len(stats)) + res := make([]goanalysis.Issue, 0, len(stats)) for _, s := range stats { if s.Complexity <= lintCtx.Settings().Gocyclo.MinComplexity { break // Break as the stats is already sorted from greatest to least } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: s.Pos, Text: fmt.Sprintf("cyclomatic complexity %d of func %s is high (> %d)", s.Complexity, formatCode(s.FuncName, lintCtx.Cfg), lintCtx.Settings().Gocyclo.MinComplexity), FromLinter: gocycloName, - }) + }, pass)) } mu.Lock() @@ -63,7 +63,7 @@ func NewGocyclo() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go index 0bde9ff2864..78d13f3bd4c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/godox.go @@ -17,10 +17,10 @@ const godoxName = "godox" func NewGodox() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: godoxName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -39,16 +39,16 @@ func NewGodox() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { - res[k] = result.Issue{ + res[k] = goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.Pos.Filename, Line: i.Pos.Line, }, Text: strings.TrimRight(i.Message, "\n"), FromLinter: godoxName, - } + }, pass) } mu.Lock() @@ -57,7 +57,7 @@ func NewGodox() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go index e3e3c44f47f..e71f27bb171 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gofmt.go @@ -9,17 +9,16 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const gofmtName = "gofmt" func NewGofmt() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gofmtName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -36,7 +35,7 @@ func NewGofmt() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var issues []result.Issue + var issues []goanalysis.Issue for _, f := range fileNames { diff, err := gofmtAPI.Run(f, lintCtx.Settings().Gofmt.Simplify) @@ -52,7 +51,9 @@ func NewGofmt() *goanalysis.Linter { return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) } - issues = append(issues, is...) + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } } if len(issues) == 0 { @@ -65,7 +66,7 @@ func NewGofmt() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go index 262b666d7df..90e19b0e85d 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/goimports.go @@ -10,17 +10,16 @@ import ( "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" "github.com/golangci/golangci-lint/pkg/lint/linter" - "github.com/golangci/golangci-lint/pkg/result" ) const goimportsName = "goimports" func NewGoimports() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: goimportsName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -37,7 +36,7 @@ func NewGoimports() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var issues []result.Issue + var issues []goanalysis.Issue for _, f := range fileNames { diff, err := goimportsAPI.Run(f) @@ -53,7 +52,9 @@ func NewGoimports() *goanalysis.Linter { return nil, errors.Wrapf(err, "can't extract issues from gofmt diff output %q", string(diff)) } - issues = append(issues, is...) + for i := range is { + issues = append(issues, goanalysis.NewIssue(&is[i], pass)) + } } if len(issues) == 0 { @@ -66,7 +67,7 @@ func NewGoimports() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go index a77c6f1ba4d..3b1b1b66f1f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/golint.go @@ -4,6 +4,7 @@ import ( "fmt" "go/ast" "go/token" + "go/types" "sync" lintAPI "github.com/golangci/lint-1" @@ -14,9 +15,10 @@ import ( "github.com/golangci/golangci-lint/pkg/result" ) -func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet) ([]result.Issue, error) { +func golintProcessPkg(minConfidence float64, files []*ast.File, fset *token.FileSet, + typesPkg *types.Package, typesInfo *types.Info) ([]result.Issue, error) { l := new(lintAPI.Linter) - ps, err := l.LintASTFiles(files, fset) + ps, err := l.LintPkg(files, fset, typesPkg, typesInfo) if err != nil { return nil, fmt.Errorf("can't lint %d files: %s", len(files), err) } @@ -44,10 +46,10 @@ const golintName = "golint" func NewGolint() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: golintName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -57,18 +59,20 @@ func NewGolint() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset) + res, err := golintProcessPkg(lintCtx.Settings().Golint.MinConfidence, pass.Files, pass.Fset, pass.Pkg, pass.TypesInfo) if err != nil || len(res) == 0 { return nil, err } mu.Lock() - resIssues = append(resIssues, res...) + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues - }).WithLoadMode(goanalysis.LoadModeSyntax) + }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go index e3551908cf7..acc0bee7891 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/gosec.go @@ -22,14 +22,14 @@ const gosecName = "gosec" func NewGosec() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue gasConfig := gosec.NewConfig() enabledRules := rules.Generate() logger := log.New(ioutil.Discard, "", 0) analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: gosecName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -53,7 +53,7 @@ func NewGosec() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { text := fmt.Sprintf("%s: %s", i.RuleID, i.What) // TODO: use severity and confidence var r *result.Range @@ -67,7 +67,7 @@ func NewGosec() *goanalysis.Linter { line = r.From } - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: token.Position{ Filename: i.File, Line: line, @@ -75,7 +75,7 @@ func NewGosec() *goanalysis.Linter { Text: text, LineRange: r, FromLinter: gosecName, - }) + }, pass)) } mu.Lock() @@ -84,7 +84,7 @@ func NewGosec() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go index fbc179a7699..7d755809d79 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/ineffassign.go @@ -16,10 +16,10 @@ const ineffassignName = "ineffassign" func NewIneffassign() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: ineffassignName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -40,13 +40,13 @@ func NewIneffassign() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("ineffectual assignment to %s", formatCode(i.IdentName, lintCtx.Cfg)), FromLinter: ineffassignName, - }) + }, pass)) } mu.Lock() @@ -55,7 +55,7 @@ func NewIneffassign() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go index 485d35ee48d..96f6b5b9d46 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/interfacer.go @@ -16,10 +16,10 @@ const interfacerName = "interfacer" func NewInterfacer() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: interfacerName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, } @@ -45,14 +45,14 @@ func NewInterfacer() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, 0, len(issues)) + res := make([]goanalysis.Issue, 0, len(issues)) for _, i := range issues { pos := pass.Fset.Position(i.Pos()) - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pos, Text: i.Message(), FromLinter: interfacerName, - }) + }, pass)) } mu.Lock() @@ -61,7 +61,7 @@ func NewInterfacer() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go index 182de66bcbf..c24b4d14814 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/lll.go @@ -77,10 +77,10 @@ const lllName = "lll" func NewLLL() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: lllName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -96,14 +96,16 @@ func NewLLL() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var res []result.Issue + var res []goanalysis.Issue spaces := strings.Repeat(" ", lintCtx.Settings().Lll.TabWidth) for _, f := range fileNames { issues, err := getLLLIssuesForFile(f, lintCtx.Settings().Lll.LineLength, spaces) if err != nil { return nil, err } - res = append(res, issues...) + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } } if len(res) == 0 { @@ -116,7 +118,7 @@ func NewLLL() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go index 775ffec8964..4f34f0ea18b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/maligned.go @@ -15,9 +15,9 @@ import ( func NewMaligned() *goanalysis.Linter { const linterName = "maligned" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -34,17 +34,17 @@ func NewMaligned() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(malignedIssues)) + issues := make([]goanalysis.Issue, 0, len(malignedIssues)) for _, i := range malignedIssues { text := fmt.Sprintf("struct of size %d bytes could be of size %d bytes", i.OldSize, i.NewSize) if lintCtx.Settings().Maligned.SuggestNewOrder { text += fmt.Sprintf(":\n%s", formatCodeBlock(i.NewStructDef, lintCtx.Cfg)) } - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: text, FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -52,7 +52,7 @@ func NewMaligned() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go index bcc9a608b8c..6cd421e5e93 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/misspell.go @@ -56,11 +56,11 @@ const misspellName = "misspell" func NewMisspell() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue var ruleErr error analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: misspellName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -106,13 +106,15 @@ func NewMisspell() *goanalysis.Linter { fileNames = append(fileNames, pos.Filename) } - var res []result.Issue + var res []goanalysis.Issue for _, f := range fileNames { issues, err := runMisspellOnFile(f, &r, lintCtx) if err != nil { return nil, err } - res = append(res, issues...) + for i := range issues { + res = append(res, goanalysis.NewIssue(&issues[i], pass)) + } } if len(res) == 0 { return nil, nil @@ -124,7 +126,7 @@ func NewMisspell() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go index 2309ca14b06..86735a51ada 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/nakedret.go @@ -82,10 +82,10 @@ const nakedretName = "nakedret" func NewNakedret() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: nakedretName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -95,14 +95,16 @@ func NewNakedret() *goanalysis.Linter { nil, ).WithContextSetter(func(lintCtx *linter.Context) { analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue for _, file := range pass.Files { v := nakedretVisitor{ maxLength: lintCtx.Settings().Nakedret.MaxFuncLines, f: pass.Fset, } ast.Walk(&v, file) - res = append(res, v.issues...) + for i := range v.issues { + res = append(res, goanalysis.NewIssue(&v.issues[i], pass)) + } } if len(res) == 0 { @@ -115,7 +117,7 @@ func NewNakedret() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go index 49deaf46a39..168dc1713e9 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/prealloc.go @@ -16,10 +16,10 @@ const preallocName = "prealloc" func NewPrealloc() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: preallocName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -31,14 +31,14 @@ func NewPrealloc() *goanalysis.Linter { s := &lintCtx.Settings().Prealloc analyzer.Run = func(pass *analysis.Pass) (interface{}, error) { - var res []result.Issue + var res []goanalysis.Issue hints := prealloc.Check(pass.Files, s.Simple, s.RangeLoops, s.ForLoops) for _, hint := range hints { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pass.Fset.Position(hint.Pos), Text: fmt.Sprintf("Consider preallocating %s", formatCode(hint.DeclaredSliceName, lintCtx.Cfg)), FromLinter: preallocName, - }) + }, pass)) } if len(res) == 0 { @@ -51,7 +51,7 @@ func NewPrealloc() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go index 1544d48a8b7..309ff270802 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/scopelint.go @@ -17,10 +17,10 @@ const scopelintName = "scopelint" func NewScopelint() *goanalysis.Linter { var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: scopelintName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -47,12 +47,14 @@ func NewScopelint() *goanalysis.Linter { } mu.Lock() - resIssues = append(resIssues, res...) + for i := range res { + resIssues = append(resIssues, goanalysis.NewIssue(&res[i], pass)) + } mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go index a2291683570..8acfc403aa7 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/structcheck.go @@ -15,9 +15,9 @@ import ( func NewStructcheck() *goanalysis.Linter { const linterName = "structcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -35,13 +35,13 @@ func NewStructcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(structcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(structcheckIssues)) for _, i := range structcheckIssues { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.FieldName, lintCtx.Cfg)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -49,7 +49,7 @@ func NewStructcheck() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go index ece8e9acb16..436530a8dbe 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/typecheck.go @@ -9,7 +9,7 @@ import ( func NewTypecheck() *goanalysis.Linter { const linterName = "typecheck" analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Run: func(pass *analysis.Pass) (interface{}, error) { return nil, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go index 04b69d3268d..147570170e2 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unconvert.go @@ -14,9 +14,9 @@ import ( func NewUnconvert() *goanalysis.Linter { const linterName = "unconvert" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -33,13 +33,13 @@ func NewUnconvert() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(positions)) + issues := make([]goanalysis.Issue, 0, len(positions)) for _, pos := range positions { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pos, Text: "unnecessary conversion", FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -47,7 +47,7 @@ func NewUnconvert() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go index d89168edac7..866d0663e63 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unparam.go @@ -17,10 +17,10 @@ import ( func NewUnparam() *goanalysis.Linter { const linterName = "unparam" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, Requires: []*analysis.Analyzer{buildssa.Analyzer}, } @@ -56,13 +56,13 @@ func NewUnparam() *goanalysis.Linter { return nil, err } - var res []result.Issue + var res []goanalysis.Issue for _, i := range unparamIssues { - res = append(res, result.Issue{ + res = append(res, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: pass.Fset.Position(i.Pos()), Text: i.Message(), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -71,7 +71,7 @@ func NewUnparam() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go index ab10a286a1b..8828ce9ba5f 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/unused.go @@ -1,7 +1,10 @@ package golinters import ( + "go/types" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/go/packages" "honnef.co/go/tools/unused" "github.com/golangci/golangci-lint/pkg/golinters/goanalysis" @@ -20,15 +23,22 @@ func NewUnused() *goanalysis.Linter { "Checks Go code for unused constants, variables, functions and types", analyzers, nil, - ).WithIssuesReporter(func(lintCtx *linter.Context) []result.Issue { - var issues []result.Issue + ).WithIssuesReporter(func(lintCtx *linter.Context) []goanalysis.Issue { + typesToPkg := map[*types.Package]*packages.Package{} + for _, pkg := range lintCtx.OriginalPackages { + typesToPkg[pkg.Types] = pkg + } + + var issues []goanalysis.Issue for _, ur := range u.Result() { p := u.ProblemObject(lintCtx.Packages[0].Fset, ur) - issues = append(issues, result.Issue{ + pkg := typesToPkg[ur.Pkg()] + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: name, Text: p.Message, Pos: p.Pos, - }) + Pkg: pkg, + }, nil)) } return issues }).WithContextSetter(func(lintCtx *linter.Context) { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go index e0ff0d67f7c..3c650d8c9d3 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/varcheck.go @@ -15,9 +15,9 @@ import ( func NewVarcheck() *goanalysis.Linter { const linterName = "varcheck" var mu sync.Mutex - var res []result.Issue + var res []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -35,13 +35,13 @@ func NewVarcheck() *goanalysis.Linter { return nil, nil } - issues := make([]result.Issue, 0, len(varcheckIssues)) + issues := make([]goanalysis.Issue, 0, len(varcheckIssues)) for _, i := range varcheckIssues { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint Pos: i.Pos, Text: fmt.Sprintf("%s is unused", formatCode(i.VarName, lintCtx.Cfg)), FromLinter: linterName, - }) + }, pass)) } mu.Lock() @@ -49,7 +49,7 @@ func NewVarcheck() *goanalysis.Linter { mu.Unlock() return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return res }).WithLoadMode(goanalysis.LoadModeTypesInfo) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go index 5064cef2b4f..4a2ccce5d64 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/whitespace.go @@ -16,10 +16,10 @@ import ( func NewWhitespace() *goanalysis.Linter { const linterName = "whitespace" var mu sync.Mutex - var resIssues []result.Issue + var resIssues []goanalysis.Issue analyzer := &analysis.Analyzer{ - Name: goanalysis.TheOnlyAnalyzerName, + Name: linterName, Doc: goanalysis.TheOnlyanalyzerDoc, } return goanalysis.NewLinter( @@ -41,7 +41,7 @@ func NewWhitespace() *goanalysis.Linter { return nil, nil } - res := make([]result.Issue, len(issues)) + res := make([]goanalysis.Issue, len(issues)) for k, i := range issues { issue := result.Issue{ Pos: token.Position{ @@ -70,7 +70,7 @@ func NewWhitespace() *goanalysis.Linter { } issue.Replacement.NewLines = []string{bracketLine} - res[k] = issue + res[k] = goanalysis.NewIssue(&issue, pass) //nolint:scopelint } mu.Lock() @@ -79,7 +79,7 @@ func NewWhitespace() *goanalysis.Linter { return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return resIssues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go index 4dcbf55e8b0..174a673baec 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/golinters/wsl.go @@ -18,7 +18,7 @@ const ( // NewWSL returns a new WSL linter. func NewWSL() *goanalysis.Linter { var ( - issues []result.Issue + issues []goanalysis.Issue mu = sync.Mutex{} analyzer = &analysis.Analyzer{ Name: goanalysis.TheOnlyAnalyzerName, @@ -37,11 +37,13 @@ func NewWSL() *goanalysis.Linter { files = []string{} linterCfg = lintCtx.Cfg.LintersSettings.WSL processorCfg = wsl.Configuration{ - StrictAppend: linterCfg.StrictAppend, - AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, - AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, - AllowCuddleWithCalls: []string{"Lock", "RLock"}, - AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, + StrictAppend: linterCfg.StrictAppend, + AllowAssignAndCallCuddle: linterCfg.AllowAssignAndCallCuddle, + AllowMultiLineAssignCuddle: linterCfg.AllowMultiLineAssignCuddle, + AllowCaseTrailingWhitespace: linterCfg.AllowCaseTrailingWhitespace, + AllowCuddleDeclaration: linterCfg.AllowCuddleDeclaration, + AllowCuddleWithCalls: []string{"Lock", "RLock"}, + AllowCuddleWithRHS: []string{"Unlock", "RUnlock"}, } ) @@ -60,16 +62,16 @@ func NewWSL() *goanalysis.Linter { defer mu.Unlock() for _, err := range wslErrors { - issues = append(issues, result.Issue{ + issues = append(issues, goanalysis.NewIssue(&result.Issue{ //nolint:scopelint FromLinter: name, Pos: err.Position, Text: err.Reason, - }) + }, pass)) } return nil, nil } - }).WithIssuesReporter(func(*linter.Context) []result.Issue { + }).WithIssuesReporter(func(*linter.Context) []goanalysis.Issue { return issues }).WithLoadMode(goanalysis.LoadModeSyntax) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go index 700552c83e1..f2cd2bd57ce 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/lintersdb/manager.go @@ -76,6 +76,7 @@ func (m Manager) GetAllSupportedLinterConfigs() []*linter.Config { WithPresets(linter.PresetBugs). WithURL("https://github.com/kisielk/errcheck"), linter.NewConfig(golinters.NewGolint()). + WithLoadForGoAnalysis(). WithPresets(linter.PresetStyle). WithURL("https://github.com/golang/lint"), diff --git a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go index fb7324ee071..62c97914e19 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/lint/runner.go @@ -112,13 +112,16 @@ func (r *Runner) runLinterSafe(ctx context.Context, lintCtx *linter.Context, specificLintCtx := *lintCtx specificLintCtx.Log = r.Log.Child(lc.Name()) + issues, err := lc.Linter.Run(ctx, &specificLintCtx) if err != nil { return nil, err } - for _, i := range issues { - i.FromLinter = lc.Name() + for i := range issues { + if issues[i].FromLinter == "" { + issues[i].FromLinter = lc.Name() + } } return issues, nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go index 070887ccb78..b955417a87a 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/logutils/log.go @@ -14,18 +14,18 @@ type Log interface { type LogLevel int const ( - // debug message, write to debug logs only by logutils.Debug + // Debug messages, write to debug logs only by logutils.Debug. LogLevelDebug LogLevel = 0 - // information messages, don't write too much messages, - // only useful ones: they are shown when running with -v + // Information messages, don't write too much messages, + // only useful ones: they are shown when running with -v. LogLevelInfo LogLevel = 1 - // hidden errors: non critical errors: work can be continued, no need to fail whole program; + // Hidden errors: non critical errors: work can be continued, no need to fail whole program; // tests will crash if any warning occurred. LogLevelWarn LogLevel = 2 - // only not hidden from user errors: whole program failing, usually + // Only not hidden from user errors: whole program failing, usually // error logging happens in 1-2 places: in the "main" function. LogLevelError LogLevel = 3 ) diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go index ca7a6ebc0a6..f36bc108adc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/checkstyle.go @@ -43,7 +43,8 @@ func (Checkstyle) Print(ctx context.Context, issues []result.Issue) error { files := map[string]*checkstyleFile{} - for _, issue := range issues { + for i := range issues { + issue := &issues[i] file, ok := files[issue.FilePath()] if !ok { file = &checkstyleFile{ diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go index 8e184464b62..26878056884 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/codeclimate.go @@ -32,7 +32,8 @@ func NewCodeClimate() *CodeClimate { func (p CodeClimate) Print(ctx context.Context, issues []result.Issue) error { allIssues := []CodeClimateIssue{} - for _, i := range issues { + for ind := range issues { + i := &issues[ind] var issue CodeClimateIssue issue.Description = i.FromLinter + ": " + i.Text issue.Location.Path = i.Pos.Filename diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go index 46b14ef0779..b3d4280961c 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/junitxml.go @@ -41,7 +41,8 @@ func NewJunitXML() *JunitXML { func (JunitXML) Print(ctx context.Context, issues []result.Issue) error { suites := make(map[string]testSuiteXML) // use a map to group by file - for _, i := range issues { + for ind := range issues { + i := &issues[ind] suiteName := i.FilePath() testSuite := suites[suiteName] testSuite.Suite = i.FilePath() diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go index 8d9974c8e03..d3cdce673dd 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/tab.go @@ -32,9 +32,8 @@ func (p Tab) SprintfColored(ca color.Attribute, format string, args ...interface func (p *Tab) Print(ctx context.Context, issues []result.Issue) error { w := tabwriter.NewWriter(logutils.StdOut, 0, 0, 2, ' ', 0) - for _, i := range issues { - i := i - p.printIssue(&i, w) + for i := range issues { + p.printIssue(&issues[i], w) } if err := w.Flush(); err != nil { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go index 772b58da399..28349920533 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/printers/text.go @@ -37,16 +37,15 @@ func (p Text) SprintfColored(ca color.Attribute, format string, args ...interfac } func (p *Text) Print(ctx context.Context, issues []result.Issue) error { - for _, i := range issues { - i := i - p.printIssue(&i) + for i := range issues { + p.printIssue(&issues[i]) if !p.printIssuedLine { continue } - p.printSourceCode(&i) - p.printUnderLinePointer(&i) + p.printSourceCode(&issues[i]) + p.printUnderLinePointer(&issues[i]) } return nil diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go index 7b1df59dd02..83ba705edfc 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/issue.go @@ -2,6 +2,8 @@ package result import ( "go/token" + + "golang.org/x/tools/go/packages" ) type Range struct { @@ -23,18 +25,22 @@ type InlineFix struct { type Issue struct { FromLinter string Text string - Pos token.Position - - LineRange *Range `json:",omitempty"` - - // HunkPos is used only when golangci-lint is run over a diff - HunkPos int `json:",omitempty"` // Source lines of a code with the issue to show SourceLines []string // If we know how to fix the issue we can provide replacement lines Replacement *Replacement + + // Pkg is needed for proper caching of linting results + Pkg *packages.Package `json:"-"` + + LineRange *Range `json:",omitempty"` + + Pos token.Position + + // HunkPos is used only when golangci-lint is run over a diff + HunkPos int `json:",omitempty"` } func (i *Issue) FilePath() string { diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go index 0ca027b8495..401c68dad12 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/fixer.go @@ -44,13 +44,14 @@ func (f Fixer) Process(issues []result.Issue) []result.Issue { outIssues := make([]result.Issue, 0, len(issues)) issuesToFixPerFile := map[string][]result.Issue{} - for _, issue := range issues { + for i := range issues { + issue := &issues[i] if issue.Replacement == nil { - outIssues = append(outIssues, issue) + outIssues = append(outIssues, *issue) continue } - issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], issue) + issuesToFixPerFile[issue.FilePath()] = append(issuesToFixPerFile[issue.FilePath()], *issue) } for file, issuesToFix := range issuesToFixPerFile { @@ -87,8 +88,9 @@ func (f Fixer) fixIssuesInFile(filePath string, issues []result.Issue) error { // merge multiple issues per line into one issue issuesPerLine := map[int][]result.Issue{} - for _, i := range issues { - issuesPerLine[i.Line()] = append(issuesPerLine[i.Line()], i) + for i := range issues { + issue := &issues[i] + issuesPerLine[issue.Line()] = append(issuesPerLine[issue.Line()], *issue) } issues = issues[:0] // reuse the same memory @@ -123,7 +125,8 @@ func (f Fixer) mergeLineIssues(lineNum int, lineIssues []result.Issue, origFileL } // check issues first - for _, i := range lineIssues { + for ind := range lineIssues { + i := &lineIssues[ind] if i.LineRange != nil { f.log.Infof("Line %d has multiple issues but at least one of them is ranged: %#v", lineNum, lineIssues) return &lineIssues[0] @@ -156,8 +159,8 @@ func (f Fixer) applyInlineFixes(lineIssues []result.Issue, origLine []byte, line // example: origLine="it's becouse of them", StartCol=5, Length=7, NewString="because" curOrigLinePos := 0 - for _, i := range lineIssues { - fix := i.Replacement.Inline + for i := range lineIssues { + fix := lineIssues[i].Replacement.Inline if fix.StartCol < curOrigLinePos { f.log.Warnf("Line %d has multiple intersecting issues: %#v", lineNum, lineIssues) return nil @@ -188,14 +191,15 @@ func (f Fixer) findNotIntersectingIssues(issues []result.Issue) []result.Issue { var ret []result.Issue var currentEnd int - for _, issue := range issues { + for i := range issues { + issue := &issues[i] rng := issue.GetLineRange() if rng.From <= currentEnd { f.log.Infof("Skip issue %#v: intersects with end %d", issue, currentEnd) continue // skip intersecting issue } f.log.Infof("Fix issue %#v with range %v", issue, issue.GetLineRange()) - ret = append(ret, issue) + ret = append(ret, *issue) currentEnd = rng.To } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go index 9d8ff9343dc..8bc3d847d65 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/result/processors/utils.go @@ -1,17 +1,16 @@ package processors import ( - "fmt" + "github.com/pkg/errors" "github.com/golangci/golangci-lint/pkg/result" ) func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []result.Issue { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - if filter(&i) { - retIssues = append(retIssues, i) + for i := range issues { + if filter(&issues[i]) { + retIssues = append(retIssues, issues[i]) } } @@ -20,15 +19,14 @@ func filterIssues(issues []result.Issue, filter func(i *result.Issue) bool) []re func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, error)) ([]result.Issue, error) { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - ok, err := filter(&i) + for i := range issues { + ok, err := filter(&issues[i]) if err != nil { - return nil, fmt.Errorf("can't filter issue %#v: %s", i, err) + return nil, errors.Wrapf(err, "can't filter issue %#v", issues[i]) } if ok { - retIssues = append(retIssues, i) + retIssues = append(retIssues, issues[i]) } } @@ -37,9 +35,8 @@ func filterIssuesErr(issues []result.Issue, filter func(i *result.Issue) (bool, func transformIssues(issues []result.Issue, transform func(i *result.Issue) *result.Issue) []result.Issue { retIssues := make([]result.Issue, 0, len(issues)) - for _, i := range issues { - i := i - newI := transform(&i) + for i := range issues { + newI := transform(&issues[i]) if newI != nil { retIssues = append(retIssues, *newI) } diff --git a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go index 78d1e3cdb0f..9628bd80f2b 100644 --- a/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go +++ b/vendor/github.com/golangci/golangci-lint/pkg/timeutils/stopwatch.go @@ -10,6 +10,8 @@ import ( "github.com/golangci/golangci-lint/pkg/logutils" ) +const noStagesText = "no stages" + type Stopwatch struct { name string startedAt time.Time @@ -33,11 +35,7 @@ type stageDuration struct { d time.Duration } -func (s *Stopwatch) sprintStages() string { - if len(s.stages) == 0 { - return "no stages" - } - +func (s *Stopwatch) stageDurationsSorted() []stageDuration { stageDurations := []stageDuration{} for n, d := range s.stages { stageDurations = append(stageDurations, stageDuration{ @@ -48,6 +46,16 @@ func (s *Stopwatch) sprintStages() string { sort.Slice(stageDurations, func(i, j int) bool { return stageDurations[i].d > stageDurations[j].d }) + return stageDurations +} + +func (s *Stopwatch) sprintStages() string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + stagesStrings := []string{} for _, s := range stageDurations { stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) @@ -56,6 +64,22 @@ func (s *Stopwatch) sprintStages() string { return fmt.Sprintf("stages: %s", strings.Join(stagesStrings, ", ")) } +func (s *Stopwatch) sprintTopStages(n int) string { + if len(s.stages) == 0 { + return noStagesText + } + + stageDurations := s.stageDurationsSorted() + + stagesStrings := []string{} + for i := 0; i < len(stageDurations) && i < n; i++ { + s := stageDurations[i] + stagesStrings = append(stagesStrings, fmt.Sprintf("%s: %s", s.name, s.d)) + } + + return fmt.Sprintf("top %d stages: %s", n, strings.Join(stagesStrings, ", ")) +} + func (s *Stopwatch) Print() { p := fmt.Sprintf("%s took %s", s.name, time.Since(s.startedAt)) if len(s.stages) == 0 { @@ -74,6 +98,14 @@ func (s *Stopwatch) PrintStages() { s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintStages()) } +func (s *Stopwatch) PrintTopStages(n int) { + var stagesDuration time.Duration + for _, s := range s.stages { + stagesDuration += s + } + s.log.Infof("%s took %s with %s", s.name, stagesDuration, s.sprintTopStages(n)) +} + func (s *Stopwatch) TrackStage(name string, f func()) { startedAt := time.Now() f() diff --git a/vendor/github.com/golangci/lint-1/lint.go b/vendor/github.com/golangci/lint-1/lint.go index de63631a38d..886c85bf099 100644 --- a/vendor/github.com/golangci/lint-1/lint.go +++ b/vendor/github.com/golangci/lint-1/lint.go @@ -118,10 +118,12 @@ func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { // LintFiles lints a set of files of a single package. // The argument is a map of filename to source. -func (l *Linter) LintASTFiles(files []*ast.File, fset *token.FileSet) ([]Problem, error) { +func (l *Linter) LintPkg(files []*ast.File, fset *token.FileSet, typesPkg *types.Package, typesInfo *types.Info) ([]Problem, error) { pkg := &pkg{ - fset: fset, - files: make(map[string]*file), + fset: fset, + files: make(map[string]*file), + typesPkg: typesPkg, + typesInfo: typesInfo, } var pkgName string for _, f := range files { @@ -193,25 +195,6 @@ type pkg struct { } func (p *pkg) lint() []Problem { - if err := p.typeCheck(); err != nil { - /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. - if e, ok := err.(types.Error); ok { - pos := p.fset.Position(e.Pos) - conf := 1.0 - if strings.Contains(e.Msg, "can't find import: ") { - // Golint is probably being run in a context that doesn't support - // typechecking (e.g. package files aren't found), so don't warn about it. - conf = 0 - } - if conf > 0 { - p.errorfAt(pos, conf, category("typechecking"), e.Msg) - } - - // TODO(dsymonds): Abort if !e.Soft? - } - */ - } - p.scanSortable() p.main = p.isMain() diff --git a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go index d41c4e97e32..6e7a76e8c8b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/asmdecl/asmdecl.go @@ -79,7 +79,6 @@ var ( asmArchArm = asmArch{name: "arm", bigEndian: false, stack: "R13", lr: true} asmArchArm64 = asmArch{name: "arm64", bigEndian: false, stack: "RSP", lr: true} asmArchAmd64 = asmArch{name: "amd64", bigEndian: false, stack: "SP", lr: false} - asmArchAmd64p32 = asmArch{name: "amd64p32", bigEndian: false, stack: "SP", lr: false} asmArchMips = asmArch{name: "mips", bigEndian: true, stack: "R29", lr: true} asmArchMipsLE = asmArch{name: "mipsle", bigEndian: false, stack: "R29", lr: true} asmArchMips64 = asmArch{name: "mips64", bigEndian: true, stack: "R29", lr: true} @@ -94,7 +93,6 @@ var ( &asmArchArm, &asmArchArm64, &asmArchAmd64, - &asmArchAmd64p32, &asmArchMips, &asmArchMipsLE, &asmArchMips64, @@ -635,9 +633,6 @@ func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr stri case "amd64.LEAQ": dst = 8 addr = true - case "amd64p32.LEAL": - dst = 4 - addr = true default: switch fn.arch.name { case "386", "amd64": diff --git a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go index 2a4e3a47449..cd31ab416ff 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/findcall/findcall.go @@ -5,7 +5,10 @@ // The findcall package defines an Analyzer that serves as a trivial // example and test of the Analysis API. It reports a diagnostic for // every call to a function or method of the name specified by its -// -name flag. +// -name flag. It also exports a fact for each declaration that +// matches the name, plus a package-level fact if the package contained +// one or more such declarations. + package findcall import ( @@ -69,6 +72,10 @@ func run(pass *analysis.Pass) (interface{}, error) { } } + if len(pass.AllObjectFacts()) > 0 { + pass.ExportPackageFact(new(foundFact)) + } + return nil, nil } diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 98b3987b974..f8363d8faae 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -100,7 +100,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, // Write writes encoded type information for the specified package to out. // The FileSet provides file position information for named objects. func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { - b, err := gcimporter.BExportData(fset, pkg) + b, err := gcimporter.IExportData(fset, pkg) if err != nil { return err } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go index be671c79b70..5f00153f896 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -6,8 +6,6 @@ // This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; // see that file for specification of the format. -// +build go1.11 - package gcimporter import ( @@ -267,6 +265,11 @@ func (w *exportWriter) tag(tag byte) { } func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + p := w.p.fset.Position(pos) file := p.Filename line := int64(p.Line) @@ -394,7 +397,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.pos(f.Pos()) w.string(f.Name()) w.typ(f.Type(), pkg) - w.bool(f.Embedded()) + w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go index a9c5733eba0..3af95f4a16a 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -143,12 +143,20 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] p.pkgIndex[pkg] = nameIndex pkgList[i] = pkg } + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } var localpkg *types.Package for _, pkg := range pkgList { if pkg.Path() == path { localpkg = pkg + break } } + if localpkg == nil { + localpkg = pkgList[0] + } names := make([]string, 0, len(p.pkgIndex[localpkg])) for name := range p.pkgIndex[localpkg] { diff --git a/vendor/golang.org/x/tools/go/ssa/func.go b/vendor/golang.org/x/tools/go/ssa/func.go index b21ff4e521e..0b99bc9ba16 100644 --- a/vendor/golang.org/x/tools/go/ssa/func.go +++ b/vendor/golang.org/x/tools/go/ssa/func.go @@ -257,6 +257,10 @@ func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.Func } } +type setNumable interface { + setNum(int) +} + // numberRegisters assigns numbers to all SSA registers // (value-defining Instructions) in f, to aid debugging. // (Non-Instruction Values are named at construction.) @@ -267,9 +271,7 @@ func numberRegisters(f *Function) { for _, instr := range b.Instrs { switch instr.(type) { case Value: - instr.(interface { - setNum(int) - }).setNum(v) + instr.(setNumable).setNum(v) v++ } } diff --git a/vendor/modules.txt b/vendor/modules.txt index 0cd5db71551..76ce9ce1c3e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -21,7 +21,7 @@ github.com/apparentlymart/go-cidr/cidr github.com/apparentlymart/go-textseg/textseg # github.com/armon/go-radix v1.0.0 github.com/armon/go-radix -# github.com/aws/aws-sdk-go v1.25.10 +# github.com/aws/aws-sdk-go v1.25.22 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -234,7 +234,7 @@ github.com/bflad/tfproviderlint/passes/schemaschema github.com/bgentry/go-netrc/netrc # github.com/bgentry/speakeasy v0.1.0 github.com/bgentry/speakeasy -# github.com/bombsimon/wsl v1.2.1 +# github.com/bombsimon/wsl v1.2.5 github.com/bombsimon/wsl # github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc github.com/boombuler/barcode @@ -278,6 +278,8 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings +# github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b +github.com/gofrs/flock # github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys @@ -312,7 +314,7 @@ github.com/golangci/gocyclo/pkg/gocyclo # github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a github.com/golangci/gofmt/gofmt github.com/golangci/gofmt/goimports -# github.com/golangci/golangci-lint v1.20.0 +# github.com/golangci/golangci-lint v1.21.0 github.com/golangci/golangci-lint/cmd/golangci-lint github.com/golangci/golangci-lint/internal/cache github.com/golangci/golangci-lint/internal/errorutil @@ -339,7 +341,7 @@ github.com/golangci/golangci-lint/pkg/result/processors github.com/golangci/golangci-lint/pkg/timeutils # github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc github.com/golangci/ineffassign -# github.com/golangci/lint-1 v0.0.0-20190930103755-fad67e08aa89 +# github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 github.com/golangci/lint-1 # github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca github.com/golangci/maligned @@ -663,7 +665,7 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0 +# golang.org/x/tools v0.0.0-20191010075000-0337d82405ff golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/internal/analysisflags golang.org/x/tools/go/analysis/internal/checker diff --git a/website/docs/d/subnet_ids.html.markdown b/website/docs/d/subnet_ids.html.markdown index 32f53f35700..7aab11df7be 100644 --- a/website/docs/d/subnet_ids.html.markdown +++ b/website/docs/d/subnet_ids.html.markdown @@ -26,7 +26,7 @@ data "aws_subnet" "example" { } output "subnet_cidr_blocks" { - value = ["${data.aws_subnet.example.*.cidr_block}"] + value = "${data.aws_subnet.example.*.cidr_block}" } ``` diff --git a/website/docs/r/elasticache_replication_group.html.markdown b/website/docs/r/elasticache_replication_group.html.markdown index a31e864199a..dc90ba6fdc6 100644 --- a/website/docs/r/elasticache_replication_group.html.markdown +++ b/website/docs/r/elasticache_replication_group.html.markdown @@ -108,6 +108,7 @@ The following arguments are supported: * `at_rest_encryption_enabled` - (Optional) Whether to enable encryption at rest. * `transit_encryption_enabled` - (Optional) Whether to enable encryption in transit. * `auth_token` - (Optional) The password used to access a password protected server. Can be specified only if `transit_encryption_enabled = true`. +* `kms_key_id` - (Optional) The ARN of the key that you wish to use if encrypting at rest. If not supplied, uses service managed encryption. Can be specified only if `at_rest_encryption_enabled = true`. * `engine_version` - (Optional) The version number of the cache engine to be used for the cache clusters in this replication group. * `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used. * `port` – (Optional) The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379. diff --git a/website/docs/r/elasticsearch_domain.html.markdown b/website/docs/r/elasticsearch_domain.html.markdown index bab2fcb0c0a..2b054de2a59 100644 --- a/website/docs/r/elasticsearch_domain.html.markdown +++ b/website/docs/r/elasticsearch_domain.html.markdown @@ -147,7 +147,7 @@ resource "aws_security_group" "es" { protocol = "tcp" cidr_blocks = [ - "${data.aws_vpc.selected.cidr_blocks}", + "${data.aws_vpc.selected.cidr_block}", ] } } diff --git a/website/docs/r/glue_job.html.markdown b/website/docs/r/glue_job.html.markdown index 381882a16a2..43b1a8a96c4 100644 --- a/website/docs/r/glue_job.html.markdown +++ b/website/docs/r/glue_job.html.markdown @@ -54,6 +54,7 @@ be removed in future releases, please use `max_capacity` instead. * `default_arguments` – (Optional) The map of default arguments for this job. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the [Calling AWS Glue APIs in Python](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-calling.html) topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the [Special Parameters Used by AWS Glue](http://docs.aws.amazon.com/glue/latest/dg/aws-glue-programming-python-glue-arguments.html) topic in the developer guide. * `description` – (Optional) Description of the job. * `execution_property` – (Optional) Execution property of the job. Defined below. +* `glue_version` - (Optional) The version of glue to use, for example "1.0". For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `max_capacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. * `max_retries` – (Optional) The maximum number of times to retry this job if it fails. * `name` – (Required) The name you assign to this job. It must be unique in your account. diff --git a/website/docs/r/storagegateway_smb_file_share.html.markdown b/website/docs/r/storagegateway_smb_file_share.html.markdown index f7beaf10d2a..f0d32c750cd 100644 --- a/website/docs/r/storagegateway_smb_file_share.html.markdown +++ b/website/docs/r/storagegateway_smb_file_share.html.markdown @@ -55,6 +55,7 @@ The following arguments are supported: * `read_only` - (Optional) Boolean to indicate write status of file share. File share does not accept writes if `true`. Defaults to `false`. * `requester_pays` - (Optional) Boolean who pays the cost of the request and the data download from the Amazon S3 bucket. Set this value to `true` if you want the requester to pay instead of the bucket owner. Defaults to `false`. * `valid_user_list` - (Optional) A list of users in the Active Directory that are allowed to access the file share. Only valid if `authentication` is set to `ActiveDirectory`. +* `tags` - (Optional) Key-value mapping of resource tags ### smb_file_share_defaults