From 566ac279ab35c8d57ada3d6f0c331ccb02c11ae7 Mon Sep 17 00:00:00 2001 From: Sam Weaver Date: Tue, 3 May 2022 15:41:17 -0400 Subject: [PATCH 001/123] d/aws_ecr_image: Add image URI attribute (#13671) --- internal/service/ecr/image_data_source.go | 29 +++++++++++++++++++ .../service/ecr/image_data_source_test.go | 2 ++ website/docs/d/ecr_image.html.markdown | 1 + 3 files changed, 32 insertions(+) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 38f0abd6397..15e751f33dd 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -47,6 +47,10 @@ func DataSourceImage() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "image_uri": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -98,11 +102,36 @@ func dataSourceImageRead(d *schema.ResourceData, meta interface{}) error { image := imageDetails[0] + params2 := &ecr.DescribeRepositoriesInput{ + RepositoryNames: []*string{image.RepositoryName}, + RegistryId: image.RegistryId, + } + + var repositoryDetails []*ecr.Repository + log.Printf("[DEBUG] Reading ECR Repositories: %s", params2) + err2 := conn.DescribeRepositoriesPages(params2, func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { + repositoryDetails = append(repositoryDetails, page.Repositories...) + return true + }) + if err2 != nil { + return fmt.Errorf("Error describing ECR repositories: %w", err) + } + + if len(repositoryDetails) == 0 { + return fmt.Errorf("No repository found") + } + if len(repositoryDetails) > 1 { + return fmt.Errorf("More than one repository found for image") + } + + repository := repositoryDetails[0] + d.SetId(aws.StringValue(image.ImageDigest)) d.Set("registry_id", image.RegistryId) d.Set("image_digest", image.ImageDigest) d.Set("image_pushed_at", image.ImagePushedAt.Unix()) d.Set("image_size_in_bytes", image.ImageSizeInBytes) + d.Set("image_uri", aws.String(aws.StringValue(repository.RepositoryUri)+"@"+aws.StringValue(image.ImageDigest))) if err := d.Set("image_tags", aws.StringValueSlice(image.ImageTags)); err != nil { return fmt.Errorf("failed to set image_tags: %w", err) } diff --git a/internal/service/ecr/image_data_source_test.go b/internal/service/ecr/image_data_source_test.go index c6f373f8672..31185df6661 100644 --- a/internal/service/ecr/image_data_source_test.go +++ b/internal/service/ecr/image_data_source_test.go @@ -27,9 +27,11 @@ func TestAccECRImageDataSource_ecrImage(t *testing.T) { resource.TestCheckResourceAttrSet(resourceByTag, "image_digest"), resource.TestCheckResourceAttrSet(resourceByTag, "image_pushed_at"), resource.TestCheckResourceAttrSet(resourceByTag, "image_size_in_bytes"), + resource.TestCheckResourceAttrSet(resourceByTag, "image_uri"), testCheckTagInImageTags(resourceByTag, tag), resource.TestCheckResourceAttrSet(resourceByDigest, "image_pushed_at"), resource.TestCheckResourceAttrSet(resourceByDigest, "image_size_in_bytes"), + resource.TestCheckResourceAttrSet(resourceByDigest, "image_uri"), testCheckTagInImageTags(resourceByDigest, tag), ), }, diff --git a/website/docs/d/ecr_image.html.markdown b/website/docs/d/ecr_image.html.markdown index 65d6242bc57..b4918422290 100644 --- a/website/docs/d/ecr_image.html.markdown +++ b/website/docs/d/ecr_image.html.markdown @@ -36,3 +36,4 @@ In addition to all arguments above, the following attributes are exported: * `image_pushed_at` - The date and time, expressed as a unix timestamp, at which the current image was pushed to the repository. * `image_size_in_bytes` - The size, in bytes, of the image in the repository. * `image_tags` - The list of tags associated with this image. +* `image_uri` - The URI for the specific image version specified by `image_tag` or `image_digest`. From 0a3ba8daea5a78e83dbc62e741d6f9fa4072fd62 Mon Sep 17 00:00:00 2001 From: Sam Weaver Date: Tue, 3 May 2022 15:53:09 -0400 Subject: [PATCH 002/123] Add changelog entry for PR 24526. --- .changelog/24526.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/24526.txt diff --git a/.changelog/24526.txt b/.changelog/24526.txt new file mode 100644 index 00000000000..7b392ba8fb6 --- /dev/null +++ b/.changelog/24526.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_ecr_image: Add image_uri attribute +``` \ No newline at end of file From 858e8783300a29408c361305856b1c33e041613f Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:17:51 -0500 Subject: [PATCH 003/123] Modified target_group.go to persist stickiness.app_cookie.cookie_name through updates between lb_cookie and app_cookie --- internal/service/elbv2/target_group.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 68fdbaecb97..4c33692be4a 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -752,6 +752,10 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta &elbv2.TargetGroupAttribute{ Key: aws.String("stickiness.lb_cookie.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.app_cookie.cookie_name"), + Value: aws.String(stickiness["cookie_name"].(string)), }) case "app_cookie": attrs = append(attrs, From 27d24c1fd0a2dfdac9366629eec73ac43024ccb5 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:30:12 -0500 Subject: [PATCH 004/123] Added acceptance test for changing ALB stickiness type --- internal/service/elbv2/target_group_test.go | 93 +++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 97f5a07c2a6..05f5e10435c 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1450,6 +1450,99 @@ func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { }) } +func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { + ctx := acctest.Context(t) + var conf elbv2.TargetGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_appStickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "app_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { ctx := acctest.Context(t) var conf elbv2.TargetGroup From 2655cd5f917dd108f47df755ecffc2aa200bf903 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:39:44 -0500 Subject: [PATCH 005/123] Added changelog file 31436.txt --- .changelog/31436.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/31436.txt diff --git a/.changelog/31436.txt b/.changelog/31436.txt new file mode 100644 index 00000000000..c36d19086b4 --- /dev/null +++ b/.changelog/31436.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lb_target_group: Persist `stickiness.app_cookie.cookie_name` across changes between app_cookie and lb_cookie ALB stickiness +``` \ No newline at end of file From 3f1bc775819d3cc37e19cc528320deec09dc9fcf Mon Sep 17 00:00:00 2001 From: David Hwang Date: Fri, 8 Dec 2023 15:02:11 -0500 Subject: [PATCH 006/123] Adds kx-dataview resource --- internal/service/finspace/kx_dataview.go | 427 ++++++++++++++++++ internal/service/finspace/kx_dataview_test.go | 241 ++++++++++ .../service/finspace/service_package_gen.go | 8 + .../docs/r/finspace_kx_dataview.html.markdown | 91 ++++ 4 files changed, 767 insertions(+) create mode 100644 internal/service/finspace/kx_dataview.go create mode 100644 internal/service/finspace/kx_dataview_test.go create mode 100644 website/docs/r/finspace_kx_dataview.html.markdown diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go new file mode 100644 index 00000000000..b18aaa8f7e8 --- /dev/null +++ b/internal/service/finspace/kx_dataview.go @@ -0,0 +1,427 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace + +import ( + "context" + "errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +// @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") +// @Tags(identifierAttribute="arn") +func ResourceKxDataview() *schema.Resource { + + return &schema.Resource{ + CreateWithoutTimeout: resourceKxDataviewCreate, + ReadWithoutTimeout: resourceKxDataviewRead, + UpdateWithoutTimeout: resourceKxDataviewUpdate, + DeleteWithoutTimeout: resourceKxDataviewDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "auto_update": { + Type: schema.TypeBool, + ForceNew: true, + Required: true, + }, + "changeset_id": { + Type: schema.TypeString, + Optional: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "availability_zone_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "segment_configurations": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "volume_name": { + Type: schema.TypeString, + Required: true, + }, + "db_paths": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + }, + }, + }, + Optional: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxDataview = "Kx Dataview" + kxDataviewIdPartCount = 3 +) + +func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + idParts := []string{ + d.Get("environment_id").(string), + d.Get("database_name").(string), + d.Get("name").(string), + } + + rId, err := flex.FlattenResourceId(idParts, kxDataviewIdPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err)...) + } + d.SetId(rId) + + in := &finspace.CreateKxDataviewInput{ + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + AutoUpdate: *aws.Bool(d.Get("auto_update").(bool)), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + ClientToken: aws.String(id.UniqueId()), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("changeset_id"); ok { + in.ChangesetId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone_id"); ok { + in.AvailabilityZoneId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("segment_configurations"); ok && len(v.([]interface{})) > 0 { + in.SegmentConfigurations = expandSegmentConfigurations(v.([]interface{})) + } + + out, err := conn.CreateKxDataview(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err)...) + } + if out == nil || out.DataviewName == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output"))...) + } + if _, err := waitKxDataviewCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err)...) + } + + return append(diags, resourceKxDataviewRead(ctx, d, meta)...) +} + +func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxDataviewById(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxDataview (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + } + d.Set("name", out.DataviewName) + d.Set("description", out.Description) + d.Set("auto_update", out.AutoUpdate) + d.Set("changeset_id", out.ChangesetId) + d.Set("availability_zone_id", out.AvailabilityZoneId) + d.Set("status", out.Status) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("database_name", out.DatabaseName) + d.Set("environment_id", out.EnvironmentId) + d.Set("az_mode", out.AzMode) + if err := d.Set("segment_configurations", flattenSegmentConfigurations(out.SegmentConfigurations)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + } + + return diags +} + +func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + in := &finspace.UpdateKxDataviewInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + ClientToken: aws.String(id.UniqueId()), + } + + if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && d.Get("auto_update").(bool) != true { + in.ChangesetId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("segment_configurations"); ok && len(v.([]interface{})) > 0 && d.HasChange("segment_configurations") { + in.SegmentConfigurations = expandSegmentConfigurations(v.([]interface{})) + } + + if _, err := conn.UpdateKxDataview(ctx, in); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err)...) + } + + if _, err := waitKxDataviewUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err)...) + } + + return append(diags, resourceKxDataviewRead(ctx, d, meta)...) +} + +func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + _, err := conn.DeleteKxDataview(ctx, &finspace.DeleteKxDataviewInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + ClientToken: aws.String(id.UniqueId()), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err)...) + } + + if _, err := waitKxDataviewDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err)...) + } + return diags +} + +func findKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { + idParts, err := flex.ExpandResourceId(id, kxDataviewIdPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxDataviewInput{ + EnvironmentId: aws.String(idParts[0]), + DatabaseName: aws.String(idParts[1]), + DataviewName: aws.String(idParts[2]), + } + + out, err := conn.GetKxDataview(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + + } + return nil, err + } + + if out == nil || out.DataviewName == nil { + return nil, tfresource.NewEmptyResultError(in) + } + return out, nil +} + +func waitKxDataviewCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusCreating), + Target: enum.Slice(types.KxDataviewStatusActive), + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + return nil, err +} + +func waitKxDataviewUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusUpdating), + Target: enum.Slice(types.KxDataviewStatusActive), + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + return nil, err +} + +func waitKxDataviewDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusDeleting), + Target: []string{}, + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxDataviewById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + return out, string(out.Status), nil + } +} + +func expandDbPath(tfList []interface{}) []string { + if tfList == nil { + return nil + } + var s []string + + for _, v := range tfList { + s = append(s, v.(string)) + } + return s +} + +func expandSegmentConfigurations(tfList []interface{}) []types.KxDataviewSegmentConfiguration { + if tfList == nil { + return nil + } + var s []types.KxDataviewSegmentConfiguration + + for _, v := range tfList { + m := v.(map[string]interface{}) + s = append(s, types.KxDataviewSegmentConfiguration{ + VolumeName: aws.String(m["volume_name"].(string)), + DbPaths: expandDbPath(m["db_paths"].([]interface{})), + }) + } + + return s +} +func flattenSegmentConfiguration(apiObject *types.KxDataviewSegmentConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + m := map[string]interface{}{} + if v := apiObject.VolumeName; aws.ToString(v) != "" { + m["volume_name"] = aws.ToString(v) + } + if v := apiObject.DbPaths; v != nil { + m["db_paths"] = v + } + return m +} + +func flattenSegmentConfigurations(apiObjects []types.KxDataviewSegmentConfiguration) []interface{} { + if apiObjects == nil { + return nil + } + var l []interface{} + for _, apiObject := range apiObjects { + l = append(l, flattenSegmentConfiguration(&apiObject)) + } + return l +} diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go new file mode 100644 index 00000000000..0a26282997d --- /dev/null +++ b/internal/service/finspace/kx_dataview_test.go @@ -0,0 +1,241 @@ +package finspace_test + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" + "testing" +) + +func TestAccFinSpaceKxDataview_basic(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + ctx := acctest.Context(t) + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccFinSpaceKxDataview_disappears(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + ctx := acctest.Context(t) + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxDataview(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccKxDataviewConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} +`, rName) +} +func testAccKxDataviewConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName)) +} + +func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ + DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + DataviewName: aws.String(rs.Primary.Attributes["name"]), + }) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + } + + *kxdataview = *resp + + return nil + } +} + +func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_dataview" { + continue + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + _, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ + DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + DataviewName: aws.String(rs.Primary.Attributes["name"]), + }) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + } + return nil + } +} + +func testAccKxDataviewVolumeBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName) +} + +func testAccKxDataviewConfig_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + testAccKxDataviewVolumeBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + + segment_configurations { + db_paths = ["/*"] + volume_name = aws_finspace_kx_volume.test.name + } +} +`, rName)) +} + +func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + ctx := acctest.Context(t) + + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_withKxVolume(rName), + + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + }, + }) +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..b34d275706c 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -60,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxDataview, + TypeName: "aws_finspace_kx_dataview", + Name: "Kx Dataview", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown new file mode 100644 index 00000000000..70a2b762c90 --- /dev/null +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -0,0 +1,91 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_dataview" +description: |- + Terraform resource for managing an AWS FinSpace Kx Dataviewk. +--- + +# Resource: aws_finspace_dataview + +Terraform resource for managing an AWS FinSpace Kx Dataview. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_dataview" "example" { + name = "my-tf-kx-dataview" + environment_id = aws_finspace_kx_environment.example.id + database_name = aws_finspace_kx_database.example.name + availability_zone_id = "use1-az2" + description = "Terraform managed Kx Dataview" + az_mode = "SINGLE" + auto_update = true + + segment_configurations { + volume_name = aws_finspace_kx_volume.example.name + db_paths = ["/*"] + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: + * SINGLE - Assigns one availability zone per cluster. + * MULTI - Assigns all the availability zones per cluster. +* `database_name` - (Required) The name of the database where you want to create a dataview. +* `environment_id` - (Required) Unique identifier for the KX environment. +* `name` - (Required) A unique identifier for the dataview. + +The following arguments are optional: +* `auto_update` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. +* `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. +* `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. +* `description` - (Optional) A description for the dataview. +* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations). +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### segment_configurations +* `db_paths` - (Required) The database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. +* `volume_name` - (Required) The name of the volume that you want to attach to a dataview. This volume must be in the same availability zone as the dataview that you are attaching to. + + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX dataview. +* `created_timestamp` - Timestamp at which the dataview was created in FinSpace. Value determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `id` - A comma-delimited string joining environment ID, database name and dataview name. +* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Dataview using the `id` (environment ID, database name and dataview name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_dataview.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (environment ID and cluster name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_dataview.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview +``` From 3a043cadd6bebdd6cc5488b43dabce5404e0a2b2 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 12:37:46 -0800 Subject: [PATCH 007/123] Push all kx volume changes for PR. --- internal/service/finspace/kx_volume.go | 504 ++++++++++++++++++ internal/service/finspace/kx_volume_test.go | 278 ++++++++++ .../service/finspace/service_package_gen.go | 8 + .../docs/r/finspace_kx_volume.html.markdown | 98 ++++ 4 files changed, 888 insertions(+) create mode 100644 internal/service/finspace/kx_volume.go create mode 100644 internal/service/finspace/kx_volume_test.go create mode 100644 website/docs/r/finspace_kx_volume.html.markdown diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go new file mode 100644 index 00000000000..0edbc919ffc --- /dev/null +++ b/internal/service/finspace/kx_volume.go @@ -0,0 +1,504 @@ +// // Copyright (c) HashiCorp, Inc. +// // SPDX-License-Identifier: MPL-2.0 +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_volume", name="Kx Volume") +// @Tags(identifierAttribute="arn") +func ResourceKxVolume() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxVolumeCreate, + ReadWithoutTimeout: resourceKxVolumeRead, + UpdateWithoutTimeout: resourceKxVolumeUpdate, + DeleteWithoutTimeout: resourceKxVolumeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "availability_zones": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + ForceNew: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "nas1_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1200, 33600), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxNAS1Type](), + }, + }, + }, + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxVolume = "Kx Volume" + kxVolumeIDPartCount = 2 +) + +func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + volumeName := d.Get("name").(string) + idParts := []string{ + environmentId, + volumeName, + } + rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + } + d.SetId(rID) + + in := &finspace.CreateKxVolumeInput{ + ClientToken: aws.String(id.UniqueId()), + AvailabilityZoneIds: flex.ExpandStringValueList(d.Get("availability_zones").([]interface{})), + EnvironmentId: aws.String(environmentId), + VolumeType: types.KxVolumeType(d.Get("type").(string)), + VolumeName: aws.String(volumeName), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + } + + // TODO: add flatten/expand functions for remaining parameters + + out, err := conn.CreateKxVolume(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + } + + if out == nil || out.VolumeName == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + } + + if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + } + + // The CreateKxVolume API currently fails to tag the Volume when the + // Tags field is set. Until the API is fixed, tag after creation instead. + if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxVolumeByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + } + + d.Set("arn", out.VolumeArn) + d.Set("name", out.VolumeName) + d.Set("description", out.Description) + d.Set("type", out.VolumeType) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("az_mode", out.AzMode) + d.Set("description", out.Description) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) + + if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + updateVolume := false + + in := &finspace.UpdateKxVolumeInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + VolumeName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok && d.HasChanges("description") { + in.Description = aws.String(v.(string)) + updateVolume = true + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && d.HasChanges("nas1_configuration") { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + updateVolume = true + } + + if !updateVolume { + return diags + } + + log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) + + if _, err := conn.UpdateKxVolume(ctx, in); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) + _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ + VolumeName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + } + + _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + } + + return diags +} + +func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusDeleting), + Target: enum.Slice(types.KxVolumeStatusDeleted), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxVolumeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { + parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxVolumeInput{ + EnvironmentId: aws.String(parts[0]), + VolumeName: aws.String(parts[1]), + } + + out, err := conn.GetKxVolume(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.VolumeArn == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +func expandNas1Configuration(tfList []interface{}) *types.KxNAS1Configuration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxNAS1Configuration{} + + if v, ok := tfMap["size"].(int); ok && v != 0 { + a.Size = aws.Int32(int32(v)) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + a.Type = types.KxNAS1Type(v) + } + return a +} + +func flattenNas1Configuration(apiObject *types.KxNAS1Configuration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.Size; v != nil { + m["size"] = aws.ToInt32(v) + } + + if v := apiObject.Type; v != "" { + m["type"] = v + } + + return []interface{}{m} +} + +func flattenCluster(apiObject *types.KxAttachedCluster) map[string]interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ClusterName; aws.ToString(v) != "" { + m["cluster_name"] = aws.ToString(v) + } + + if v := apiObject.ClusterStatus; v != "" { + m["cluster_status"] = string(v) + } + + if v := apiObject.ClusterType; v != "" { + m["cluster_type"] = string(v) + } + + return m +} + +func flattenAttachedClusters(apiObjects []types.KxAttachedCluster) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var l []interface{} + + for _, apiObject := range apiObjects { + l = append(l, flattenCluster(&apiObject)) + } + + return l +} diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go new file mode 100644 index 00000000000..520c918c1d0 --- /dev/null +++ b/internal/service/finspace/kx_volume_test.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxVolume_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_volume" { + continue + } + + input := &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + } + _, err := conn.GetKxVolume(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxVolume, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` + resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } + } + `, rName)) +} + +func testAccKxVolumeConfigBase(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + + output "account_id" { + value = data.aws_caller_identity.current.account_id + } + + resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + } + + resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn + } + + data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } + } + + resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json + } + + resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true + } + + resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + } + + resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + } + + resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + } + + data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id + } + + resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + `, rName) +} + +func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + }) + + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *KxVolume = *resp + + return nil + } +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..9fb0005d844 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -60,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown new file mode 100644 index 00000000000..71e855f1bc4 --- /dev/null +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -0,0 +1,98 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_volume" +description: |- + Terraform resource for managing an AWS FinSpace Kx Volume. +--- + +# Resource: aws_finspace_kx_volume + +Terraform resource for managing an AWS FinSpace Kx Volume. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_volume" "example" { + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. + * SINGLE - Assigns one availability zone per volume. +* `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. +* `name` - (Required) Unique name for the volumr that you want to create. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. + +The following arguments are optional: + +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `description` - (Optional) Description of the volume. +* `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume + + +### nas1_configuration + +The nas1_configuration block supports the following arguments: + +* `size` - (Required) The size of the network attached storage. +* `security_group_ids` - (Required) The type of the network attached storage. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX volume. +* `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `status` - The status of volume creation. + * CREATING – The volume creation is in progress. + * CREATE_FAILED – The volume creation has failed. + * ACTIVE – The volume is active. + * UPDATING – The volume is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * UPDATED – The volume is successfully updated. + * DELETING – The volume is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the volume. + * DELETED – The volume is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `45m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_volume.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume +``` From c1f5965ebebadffc3eef1df7383210b01d04c0c8 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 12:48:09 -0800 Subject: [PATCH 008/123] Add scaling group for hashicorp PR. --- internal/service/finspace/kx_scaling_group.go | 292 ++++++++++++++++++ .../service/finspace/kx_scaling_group_test.go | 273 ++++++++++++++++ .../service/finspace/service_package_gen.go | 16 + .../r/finspace_kx_scaling_group.html.markdown | 81 +++++ 4 files changed, 662 insertions(+) create mode 100644 internal/service/finspace/kx_scaling_group.go create mode 100644 internal/service/finspace/kx_scaling_group_test.go create mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go new file mode 100644 index 00000000000..db5b8e9713c --- /dev/null +++ b/internal/service/finspace/kx_scaling_group.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_scaling_group", name="Kx Scaling Group") +// @Tags(identifierAttribute="arn") +func ResourceKxScalingGroup() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxScalingGroupCreate, + ReadWithoutTimeout: resourceKxScalingGroupRead, + UpdateWithoutTimeout: resourceKxScalingGroupUpdate, + DeleteWithoutTimeout: resourceKxScalingGroupDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "availability_zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "host_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "clusters": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxScalingGroup = "Kx Scaling Group" + kxScalingGroupIDPartCount = 2 +) + +func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + scalingGroupName := d.Get("name").(string) + idParts := []string{ + environmentId, + scalingGroupName, + } + rID, err := flex.FlattenResourceId(idParts, kxScalingGroupIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err)...) + } + d.SetId(rID) + + in := &finspace.CreateKxScalingGroupInput{ + EnvironmentId: aws.String(environmentId), + ScalingGroupName: aws.String(scalingGroupName), + HostType: aws.String(d.Get("host_type").(string)), + AvailabilityZoneId: aws.String(d.Get("availability_zone_id").(string)), + Tags: getTagsIn(ctx), + } + + out, err := conn.CreateKxScalingGroup(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err)...) + } + + if out == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output"))...) + } + + if _, err := waitKxScalingGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err)...) + } + + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusCreating), + Target: enum.Slice(types.KxScalingGroupStatusActive), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxScalingGroupById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxScalingGroupById(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxScalingGroup (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err)...) + } + d.Set("arn", out.ScalingGroupArn) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("name", out.ScalingGroupName) + d.Set("availability_zone_id", out.AvailabilityZoneId) + d.Set("host_type", out.HostType) + d.Set("clusters", out.Clusters) + + parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err)...) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxScalingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + // Tags only. + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace KxScalingGroup %s", d.Id()) + _, err := conn.DeleteKxScalingGroup(ctx, &finspace.DeleteKxScalingGroupInput{ + ScalingGroupName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err)...) + } + + _, err = waitKxScalingGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err)...) + } + + return diags +} + +func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { + parts, err := flex.ExpandResourceId(id, kxScalingGroupIDPartCount, false) + if err != nil { + return nil, err + } + in := &finspace.GetKxScalingGroupInput{ + EnvironmentId: aws.String(parts[0]), + ScalingGroupName: aws.String(parts[1]), + } + + out, err := conn.GetKxScalingGroup(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.ScalingGroupName == nil { + return nil, tfresource.NewEmptyResultError(in) + } + return out, nil +} + +func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusDeleting), + Target: enum.Slice(types.KxScalingGroupStatusDeleted), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go new file mode 100644 index 00000000000..3f5b3714a94 --- /dev/null +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxScalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxScalingGroupStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxScalingGroup_dissappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxScalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxScalingGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_scaling_group" { + continue + } + + input := &finspace.GetKxScalingGroupInput{ + ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + } + _, err := conn.GetKxScalingGroup(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxScalingGroup(ctx, &finspace.GetKxScalingGroupInput{ + ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + }) + + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, err) + } + + *KxScalingGroup = *resp + + return nil + } +} + +func testAccKxScalingGroupConfigBase(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + + output "account_id" { + value = data.aws_caller_identity.current.account_id + } + + resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + } + + resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn + } + + data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } + } + + resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json + } + + resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true + } + + resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + } + + resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + } + + resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + } + + data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id + } + + resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + `, rName) +} + +func testAccKxScalingGroupConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` + resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + } + `, rName)) +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..029ecf2b731 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -52,6 +52,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxScalingGroup, + TypeName: "aws_finspace_kx_scaling_group", + Name: "Kx Scaling Group", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, { Factory: ResourceKxUser, TypeName: "aws_finspace_kx_user", @@ -60,6 +68,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown new file mode 100644 index 00000000000..fa5717c5862 --- /dev/null +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -0,0 +1,81 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_scaling_group" +description: |- + Terraform resource for managing an AWS FinSpace Kx Scaling Group. +--- + +# Resource: aws_finspace_kx_scaling_group + +Terraform resource for managing an AWS FinSpace Kx Scaling Group. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_scaling_group" "test" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" +} +``` + +## Argument Reference + +The following arguments are required: + +* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. +* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. +* `name` - (Required) Unique name for the scaling group that you want to create. +* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. + +The following arguments are optional: + +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. +* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. +* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. +*`status` - The status of scaling group. + * CREATING – The scaling group creation is in progress. + * CREATE_FAILED – The scaling group creation has failed. + * ACTIVE – The scaling group is active. + * UPDATING – The scaling group is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * DELETING – The scaling group is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the scaling group. + * DELETED – The scaling group is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `45m`) +* `update` - (Default `30m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_scaling_group.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup +``` From 623c3ecdeb9e02f45a18223e929ea0dd1099a44d Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 13:16:30 -0800 Subject: [PATCH 009/123] Cluster chnages for Hashicorp PR. --- internal/service/finspace/kx_cluster.go | 219 ++++++++++- internal/service/finspace/kx_cluster_test.go | 345 +++++++++++++++++- .../docs/r/finspace_kx_cluster.html.markdown | 22 +- .../r/finspace_kx_scaling_group.html.markdown | 81 ++++ 4 files changed, 639 insertions(+), 28 deletions(-) create mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 2346f196a56..6a6f26833e2 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -132,7 +132,7 @@ func ResourceKxCluster() *schema.Resource { }, "capacity_configuration": { Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -225,6 +225,12 @@ func ResourceKxCluster() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 63), }, + "dataview_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -280,17 +286,23 @@ func ResourceKxCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice( enum.Slice(types.KxSavedownStorageTypeSds01), true), }, "size": { Type: schema.TypeInt, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(10, 16000), }, + "volume_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -346,6 +358,64 @@ func ResourceKxCluster() *schema.Resource { }, }, }, + "scaling_group_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scaling_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + ValidateFunc: validation.FloatAtLeast(0.1), + }, + "node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "memory_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + "memory_reservation": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + }, + }, + }, + "tickerplant_log_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tickerplant_log_volumes": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + }, + }, + }, + }, }, CustomizeDiff: verify.SetTagsDiff, @@ -375,14 +445,13 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(rID) in := &finspace.CreateKxClusterInput{ - EnvironmentId: aws.String(environmentId), - ClusterName: aws.String(clusterName), - ClusterType: types.KxClusterType(d.Get("type").(string)), - ReleaseLabel: aws.String(d.Get("release_label").(string)), - AzMode: types.KxAzMode(d.Get("az_mode").(string)), - CapacityConfiguration: expandCapacityConfiguration(d.Get("capacity_configuration").([]interface{})), - ClientToken: aws.String(id.UniqueId()), - Tags: getTagsIn(ctx), + EnvironmentId: aws.String(environmentId), + ClusterName: aws.String(clusterName), + ClusterType: types.KxClusterType(d.Get("type").(string)), + ReleaseLabel: aws.String(d.Get("release_label").(string)), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + ClientToken: aws.String(id.UniqueId()), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("description"); ok { @@ -401,6 +470,10 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.AvailabilityZoneId = aws.String(v.(string)) } + if v, ok := d.GetOk("capacity_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.CapacityConfiguration = expandCapacityConfiguration(v.([]interface{})) + } + if v, ok := d.GetOk("command_line_arguments"); ok && len(v.(map[string]interface{})) > 0 { in.CommandLineArguments = expandCommandLineArguments(v.(map[string]interface{})) } @@ -429,6 +502,14 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.Code = expandCode(v.([]interface{})) } + if v, ok := d.GetOk("scaling_group_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.ScalingGroupConfiguration = expandScalingGroupConfiguration(v.([]interface{})) + } + + if v, ok := d.GetOk("tickerplant_log_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.TickerplantLogConfiguration = expandTickerplantLogConfiguration(v.([]interface{})) + } + out, err := conn.CreateKxCluster(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxCluster, d.Get("name").(string), err) @@ -507,6 +588,14 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } + if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + } + + if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + } + // compose cluster ARN using environment ARN parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) if err != nil { @@ -767,6 +856,38 @@ func expandAutoScalingConfiguration(tfList []interface{}) *types.AutoScalingConf return a } +func expandScalingGroupConfiguration(tfList []interface{}) *types.KxScalingGroupConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxScalingGroupConfiguration{} + + if v, ok := tfMap["scaling_group_name"].(string); ok && v != "" { + a.ScalingGroupName = aws.String(v) + } + + if v, ok := tfMap["node_count"].(int); ok && v != 0 { + a.NodeCount = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_limit"].(int); ok && v != 0 { + a.MemoryLimit = aws.Int32(int32(v)) + } + + if v, ok := tfMap["cpu"].(float64); ok && v != 0 { + a.Cpu = aws.Float64(v) + } + + if v, ok := tfMap["memory_reservation"].(int); ok && v != 0 { + a.MemoryReservation = aws.Int32(int32(v)) + } + + return a +} + func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownStorageConfiguration { if len(tfList) == 0 || tfList[0] == nil { return nil @@ -784,6 +905,10 @@ func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownS a.Size = aws.Int32(int32(v)) } + if v, ok := tfMap["volume_name"].(string); ok && v != "" { + a.VolumeName = aws.String(v) + } + return a } @@ -815,6 +940,22 @@ func expandVPCConfiguration(tfList []interface{}) *types.VpcConfiguration { return a } +func expandTickerplantLogConfiguration(tfList []interface{}) *types.TickerplantLogConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.TickerplantLogConfiguration{} + + if v, ok := tfMap["tickerplant_log_volumes"].(*schema.Set); ok && v.Len() > 0 { + a.TickerplantLogVolumes = flex.ExpandStringValueSet(v) + } + + return a +} + func expandCacheStorageConfiguration(tfMap map[string]interface{}) *types.KxCacheStorageConfiguration { if tfMap == nil { return nil @@ -896,6 +1037,10 @@ func expandDatabase(tfMap map[string]interface{}) *types.KxDatabaseConfiguration a.DatabaseName = aws.String(v) } + if v, ok := tfMap["dataview_name"].(string); ok && v != "" { + a.DataviewName = aws.String(v) + } + if v, ok := tfMap["cache_configurations"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { a.CacheConfigurations = expandCacheConfigurations(v.([]interface{})) } @@ -1059,6 +1204,50 @@ func flattenAutoScalingConfiguration(apiObject *types.AutoScalingConfiguration) return []interface{}{m} } +func flattenScalingGroupConfiguration(apiObject *types.KxScalingGroupConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ScalingGroupName; v != nil { + m["scaling_group_name"] = aws.ToString(v) + } + + if v := apiObject.NodeCount; v != nil { + m["node_count"] = aws.ToInt32(v) + } + + if v := apiObject.MemoryLimit; v != nil { + m["memory_limit"] = aws.ToInt32(v) + } + + if v := apiObject.Cpu; v != nil { + m["cpu"] = aws.ToFloat64(v) + } + + if v := apiObject.MemoryReservation; v != nil { + m["memory_reservation"] = aws.ToInt32(v) + } + + return []interface{}{m} +} + +func flattenTickerplantLogConfiguration(apiObject *types.TickerplantLogConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.TickerplantLogVolumes; v != nil { + m["tickerplant_log_volumes"] = v + } + + return []interface{}{m} +} + func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfiguration) []interface{} { if apiObject == nil { return nil @@ -1074,6 +1263,10 @@ func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfi m["size"] = v } + if v := apiObject.VolumeName; v != nil { + m["volume_name"] = aws.ToString(v) + } + return []interface{}{m} } @@ -1200,6 +1393,10 @@ func flattenDatabase(apiObject *types.KxDatabaseConfiguration) map[string]interf m["database_name"] = aws.ToString(v) } + if v := apiObject.DataviewName; v != nil { + m["dataview_name"] = aws.ToString(v) + } + if v := apiObject.CacheConfigurations; v != nil { m["cache_configurations"] = flattenCacheConfigurations(v) } diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 6fc14e33133..3c5c2738a75 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -32,7 +32,7 @@ func testAccPreCheckManagedKxLicenseEnabled(t *testing.T) { } } -func TestAccFinSpaceKxCluster_basic(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -69,7 +69,7 @@ func TestAccFinSpaceKxCluster_basic(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_disappears(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -101,7 +101,7 @@ func TestAccFinSpaceKxCluster_disappears(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_description(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -132,7 +132,7 @@ func TestAccFinSpaceKxCluster_description(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_database(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -164,7 +164,7 @@ func TestAccFinSpaceKxCluster_database(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -195,7 +195,7 @@ func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -233,7 +233,7 @@ func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -271,7 +271,7 @@ func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_code(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -319,7 +319,7 @@ func TestAccFinSpaceKxCluster_code(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -350,7 +350,7 @@ func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_rdb(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -381,7 +381,7 @@ func TestAccFinSpaceKxCluster_rdb(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -412,7 +412,7 @@ func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -443,7 +443,7 @@ func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -484,7 +484,7 @@ func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -527,7 +527,7 @@ func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_tags(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -576,6 +576,134 @@ func TestAccFinSpaceKxCluster_tags(t *testing.T) { }) } +func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfig_ScalingGroup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccSKIPFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxClusterInScalingGroup_withKxDataview(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfigInScalingGroup_withKxDataview(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + func testAccCheckKxClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -746,6 +874,50 @@ resource "aws_route" "r" { `, rName) } +func testAccKxClusterConfigScalingGroupBase(rName string) string { + return fmt.Sprintf(` + resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + } + `, rName) +} + +func testAccKxClusterConfigKxVolumeBase(rName string) string { + return fmt.Sprintf(` + resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_1000" + size= 1200 + } + } + `, rName) +} + +func testAccKxClusterConfigKxDataviewBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} + +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName) +} func testAccKxClusterConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), @@ -772,6 +944,149 @@ resource "aws_finspace_kx_cluster" "test" { `, rName)) } +func testAccKxClusterConfig_ScalingGroup(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } +} +`, rName)) +} + +func testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + } + +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "RDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + database { + database_name = aws_finspace_kx_database.test.name + } + savedown_storage_configuration { + volume_name = aws_finspace_kx_volume.test.name + } +} +`, rName)) +} + +func testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "TICKERPLANT" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + tickerplant_log_configuration { + tickerplant_log_volumes = [aws_finspace_kx_volume.test.name] + } +} +`, rName)) +} + +func testAccKxClusterConfigInScalingGroup_withKxDataview(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + testAccKxClusterConfigKxDataviewBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + + database { + database_name = aws_finspace_kx_database.test.name + dataview_name = aws_finspace_kx_dataview.test.name + } + + lifecycle { + ignore_changes = [database] + } +} +`, rName)) +} + func testAccKxClusterConfig_description(rName, description string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index 52ed4105d4a..f7d59e6d71a 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -92,6 +92,8 @@ The following arguments are optional: * `execution_role` - (Optional) An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster. * `initialization_script` - (Optional) Path to Q program that will be run at launch of a cluster. This is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. * `savedown_storage_configuration` - (Optional) Size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose `type` as RDB. All the data written to this storage space is lost when the cluster node is restarted. See [savedown_storage_configuration](#savedown_storage_configuration). +* `scaling_group_configuration` - (Optional) The structure that stores the configuration details of a scaling group. +* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_scaling_configuration @@ -149,6 +151,7 @@ The database block supports the following arguments: * `database_name` - (Required) Name of the KX database. * `cache_configurations` - (Optional) Configuration details for the disk cache to increase performance reading from a KX database mounted to the cluster. See [cache_configurations](#cache_configurations). * `changeset_id` - (Optional) A unique identifier of the changeset that is associated with the cluster. +* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. #### cache_configurations @@ -161,9 +164,10 @@ The cache_configuration block supports the following arguments: The savedown_storage_configuration block supports the following arguments: -* `type` - (Required) Type of writeable storage space for temporarily storing your savedown data. The valid values are: +* `type` - (Optional) Type of writeable storage space for temporarily storing your savedown data. The valid values are: * SDS01 - This type represents 3000 IOPS and io2 ebs volume type. -* `size` - (Required) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `size` - (Optional) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `volume_name` - (Optional) The name of the kdb volume that you want to use as writeable save-down storage for clusters. ### vpc_configuration @@ -174,6 +178,20 @@ The vpc_configuration block supports the following arguments: * `subnet_ids `- (Required) Identifier of the subnet that the Privatelink VPC endpoint uses to connect to the cluster. * `ip_address_type` - (Required) IP address type for cluster network configuration parameters. The following type is available: IP_V4 - IP address version 4. +### scaling_group_configuration + +* `scaling_group_name` - (Required) A unique identifier for the kdb scaling group. +* `memory_reservation` - (Required) A reservation of the minimum amount of memory that should be available on the scaling group for a kdb cluster to be successfully placed in a scaling group. +* `node_count` - (Required) The number of kdb cluster nodes. +* `cpu` - The number of vCPUs that you want to reserve for each node of this kdb cluster on the scaling group host. +* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. + +### tickerplant_log_configuration + +The tickerplant_log_configuration block supports the following arguments: + +* tickerplant_log_volumes - (Required) The names of the volumes for tickerplant logs. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown new file mode 100644 index 00000000000..fa5717c5862 --- /dev/null +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -0,0 +1,81 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_scaling_group" +description: |- + Terraform resource for managing an AWS FinSpace Kx Scaling Group. +--- + +# Resource: aws_finspace_kx_scaling_group + +Terraform resource for managing an AWS FinSpace Kx Scaling Group. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_scaling_group" "test" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" +} +``` + +## Argument Reference + +The following arguments are required: + +* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. +* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. +* `name` - (Required) Unique name for the scaling group that you want to create. +* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. + +The following arguments are optional: + +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. +* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. +* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. +*`status` - The status of scaling group. + * CREATING – The scaling group creation is in progress. + * CREATE_FAILED – The scaling group creation has failed. + * ACTIVE – The scaling group is active. + * UPDATING – The scaling group is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * DELETING – The scaling group is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the scaling group. + * DELETED – The scaling group is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `45m`) +* `update` - (Default `30m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_scaling_group.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup +``` From dd1d92cc4239aa6081c5c00ec44a581daccc514d Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 13:42:04 -0800 Subject: [PATCH 010/123] remove kx volume from service package. --- internal/service/finspace/service_package_gen.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 029ecf2b731..46468a889b9 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -68,14 +68,6 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, - { - Factory: ResourceKxVolume, - TypeName: "aws_finspace_kx_volume", - Name: "Kx Volume", - Tags: &types.ServicePackageResourceTags{ - IdentifierAttribute: "arn", - }, - }, } } From 33301ff8451efe887d20fcf93a81edca530d4d8d Mon Sep 17 00:00:00 2001 From: David Hwang Date: Fri, 8 Dec 2023 16:45:55 -0500 Subject: [PATCH 011/123] Remove skips from cluster test --- internal/service/finspace/kx_cluster_test.go | 30 +++---- .../r/finspace_kx_scaling_group.html.markdown | 81 ------------------- 2 files changed, 15 insertions(+), 96 deletions(-) delete mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 3c5c2738a75..16c5b53d1d6 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -101,7 +101,7 @@ func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { +func TestAccFinSpaceKxCluster_description(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -132,7 +132,7 @@ func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { +func TestAccFinSpaceKxCluster_database(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -164,7 +164,7 @@ func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -195,7 +195,7 @@ func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -233,7 +233,7 @@ func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -271,7 +271,7 @@ func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { +func TestAccFinSpaceKxCluster_code(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -319,7 +319,7 @@ func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { +func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -350,7 +350,7 @@ func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { +func TestAccFinSpaceKxCluster_rdb(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -381,7 +381,7 @@ func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { +func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -412,7 +412,7 @@ func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { +func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -443,7 +443,7 @@ func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { +func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -484,7 +484,7 @@ func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { +func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -527,7 +527,7 @@ func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { +func TestAccFinSpaceKxCluster_tags(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -576,7 +576,7 @@ func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { +func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -608,7 +608,7 @@ func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown deleted file mode 100644 index fa5717c5862..00000000000 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ /dev/null @@ -1,81 +0,0 @@ ---- -subcategory: "FinSpace" -layout: "aws" -page_title: "AWS: aws_finspace_kx_scaling_group" -description: |- - Terraform resource for managing an AWS FinSpace Kx Scaling Group. ---- - -# Resource: aws_finspace_kx_scaling_group - -Terraform resource for managing an AWS FinSpace Kx Scaling Group. - -## Example Usage - -### Basic Usage - -```terraform -resource "aws_finspace_kx_scaling_group" "test" { - name = "my-tf-kx-scalinggroup" - environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" -} -``` - -## Argument Reference - -The following arguments are required: - -* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. -* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. -* `name` - (Required) Unique name for the scaling group that you want to create. -* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. - -The following arguments are optional: - -* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. - -## Attribute Reference - -This resource exports the following attributes in addition to the arguments above: - -* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. -* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. -* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. -* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. -*`status` - The status of scaling group. - * CREATING – The scaling group creation is in progress. - * CREATE_FAILED – The scaling group creation has failed. - * ACTIVE – The scaling group is active. - * UPDATING – The scaling group is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * DELETING – The scaling group is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the scaling group. - * DELETED – The scaling group is successfully deleted. -* `status_reason` - The error message when a failed state occurs. -* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). - -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `45m`) -* `update` - (Default `30m`) -* `delete` - (Default `60m`) - -## Import - -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: - -```terraform -import { - to = aws_finspace_kx_scaling_group.example - id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" -} -``` - -Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: - -```console -% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup -``` From 3e1494dc0eb37063f2f5630d0ce40fca2147f786 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Mon, 11 Dec 2023 03:10:17 +0600 Subject: [PATCH 012/123] Wait for import to become complete during dynamodb table creation if import fails i.e if import doesn't lead to completed status with timeout period or fails due to some error before that, we will throw an exception. --- internal/service/dynamodb/status.go | 15 +++++++++++++++ internal/service/dynamodb/table.go | 7 ++++++- internal/service/dynamodb/wait.go | 17 +++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/internal/service/dynamodb/status.go b/internal/service/dynamodb/status.go index 4eb294b3641..b6c5d92c88d 100644 --- a/internal/service/dynamodb/status.go +++ b/internal/service/dynamodb/status.go @@ -49,6 +49,21 @@ func statusTable(ctx context.Context, conn *dynamodb.DynamoDB, tableName string) } } +func statusImport(ctx context.Context, conn *dynamodb.DynamoDB, importArn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + describeImportInput := &dynamodb.DescribeImportInput{ + ImportArn: &importArn, + } + output, err := conn.DescribeImportWithContext(ctx, describeImportInput) + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.ImportTableDescription.ImportStatus), nil + } +} + func statusReplicaUpdate(ctx context.Context, conn *dynamodb.DynamoDB, tableName, region string) retry.StateRefreshFunc { return func() (interface{}, string, error) { result, err := conn.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index 422c1b54202..c61acaf9369 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -602,7 +602,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter input.TableCreationParameters = tcp - _, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (interface{}, error) { + importTableOutput, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (interface{}, error) { return conn.ImportTableWithContext(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, "ThrottlingException") { @@ -621,6 +621,11 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter if err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, tableName, err) } + + importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn + if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) + } } else { input := &dynamodb.CreateTableInput{ BillingMode: aws.String(d.Get("billing_mode").(string)), diff --git a/internal/service/dynamodb/wait.go b/internal/service/dynamodb/wait.go index ab26a11dae6..b83d49dca5c 100644 --- a/internal/service/dynamodb/wait.go +++ b/internal/service/dynamodb/wait.go @@ -77,6 +77,23 @@ func waitTableActive(ctx context.Context, conn *dynamodb.DynamoDB, tableName str return nil, err } +func waitImportComplete(ctx context.Context, conn *dynamodb.DynamoDB, importArn string, timeout time.Duration) (*dynamodb.DescribeImportOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{dynamodb.ImportStatusInProgress}, + Target: []string{dynamodb.ImportStatusCompleted}, + Timeout: maxDuration(createTableTimeout, timeout), + Refresh: statusImport(ctx, conn, importArn), + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dynamodb.DescribeImportOutput); ok { + return output, err + } + + return nil, err +} + func waitTableDeleted(ctx context.Context, conn *dynamodb.DynamoDB, tableName string, timeout time.Duration) (*dynamodb.TableDescription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{dynamodb.TableStatusActive, dynamodb.TableStatusDeleting}, From 6bba6eefc3a8c2d024a1b37fb005f6c33fbbcbf0 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Wed, 13 Dec 2023 01:36:39 +0600 Subject: [PATCH 013/123] add better logging when dynamodb import fails --- internal/service/dynamodb/wait.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/dynamodb/wait.go b/internal/service/dynamodb/wait.go index b83d49dca5c..5a1b44a571f 100644 --- a/internal/service/dynamodb/wait.go +++ b/internal/service/dynamodb/wait.go @@ -87,6 +87,10 @@ func waitImportComplete(ctx context.Context, conn *dynamodb.DynamoDB, importArn outputRaw, err := stateConf.WaitForStateContext(ctx) + if err != nil { + err = fmt.Errorf("ImportArn %q : %w", importArn, err) + } + if output, ok := outputRaw.(*dynamodb.DescribeImportOutput); ok { return output, err } From 41d42970ddb7a3f2a3e588af9ac663683885c048 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Wed, 13 Dec 2023 02:57:19 +0600 Subject: [PATCH 014/123] setId before throwing exception for dynamoImportFailure for tainting Otherwise the table would get created but state won't manage it which would then require manual intervention to fix. --- internal/service/dynamodb/table.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index c61acaf9369..bdbe09fbffe 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -624,6 +624,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + d.SetId(tableName) return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) } } else { From 184262f363e6c583c333e494e6fe3255262e8d64 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:15:39 -0500 Subject: [PATCH 015/123] deadcode: 'diag.FromAttributeError'. --- internal/errs/diag.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal/errs/diag.go b/internal/errs/diag.go index 7faf6b596c5..4b3666632d3 100644 --- a/internal/errs/diag.go +++ b/internal/errs/diag.go @@ -68,13 +68,6 @@ func NewWarningDiagnostic(summary, detail string) diag.Diagnostic { } } -func FromAttributeError(path cty.Path, err error) diag.Diagnostic { - return withPath( - NewErrorDiagnostic(err.Error(), ""), - path, - ) -} - func withPath(d diag.Diagnostic, path cty.Path) diag.Diagnostic { d.AttributePath = path return d From 0c8849a1c8b8b1b2fd848d880301f89ed2041d59 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:19:06 -0500 Subject: [PATCH 016/123] deadcode: 'flex.ExpandFrameworkListNestedBlock' and 'flex.FlattenFrameworkListNestedBlock'. --- internal/framework/flex/list.go | 52 --------------------------------- 1 file changed, 52 deletions(-) diff --git a/internal/framework/flex/list.go b/internal/framework/flex/list.go index 7199673e6eb..5555cf51686 100644 --- a/internal/framework/flex/list.go +++ b/internal/framework/flex/list.go @@ -10,9 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" - "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/slices" ) func ExpandFrameworkStringList(ctx context.Context, v basetypes.ListValuable) []*string { @@ -86,52 +83,3 @@ func FlattenFrameworkStringValueListLegacy[T ~string](_ context.Context, vs []T) return types.ListValueMust(types.StringType, elems) } - -type FrameworkElementExpanderFunc[T any, U any] func(context.Context, T) U - -func ExpandFrameworkListNestedBlock[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, U]) []U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - return slices.ApplyToAll(data, func(t T) U { - return f(ctx, t) - }) -} - -func ExpandFrameworkListNestedBlockPtr[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, *U]) *U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - if len(data) == 0 { - return nil - } - - return f(ctx, data[0]) -} - -type FrameworkElementFlattenerFunc[T any, U any] func(context.Context, U) T - -func FlattenFrameworkListNestedBlock[T any, U any](ctx context.Context, apiObjects []U, f FrameworkElementFlattenerFunc[T, U]) types.List { - attributeTypes := fwtypes.AttributeTypesMust[T](ctx) - elementType := types.ObjectType{AttrTypes: attributeTypes} - - if len(apiObjects) == 0 { - return types.ListNull(elementType) - } - - data := slices.ApplyToAll(apiObjects, func(apiObject U) T { - return f(ctx, apiObject) - }) - - return fwdiag.Must(types.ListValueFrom(ctx, elementType, data)) -} From 40081026a52dcf50a1d67d814bca116429ec6ddc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:23:43 -0500 Subject: [PATCH 017/123] deadcode: service/dynamodb. --- internal/service/dynamodb/table_item.go | 30 ------------------------- 1 file changed, 30 deletions(-) diff --git a/internal/service/dynamodb/table_item.go b/internal/service/dynamodb/table_item.go index fae285cb8ee..344807ef2d9 100644 --- a/internal/service/dynamodb/table_item.go +++ b/internal/service/dynamodb/table_item.go @@ -8,7 +8,6 @@ import ( "fmt" "log" "reflect" - "regexp" "strings" "github.com/aws/aws-sdk-go/aws" @@ -270,35 +269,6 @@ func FindTableItem(ctx context.Context, conn *dynamodb.DynamoDB, tableName strin return out, nil } -func BuildExpressionAttributeNames(attrs map[string]*dynamodb.AttributeValue) map[string]*string { - names := map[string]*string{} - - for key := range attrs { - names["#a_"+cleanKeyName(key)] = aws.String(key) - } - - log.Printf("[DEBUG] ExpressionAttributeNames: %+v", names) - return names -} - -func cleanKeyName(key string) string { - reg, err := regexp.Compile("[A-Za-z^]+") // suspect regexp - if err != nil { - log.Printf("[ERROR] clean keyname errored %v", err) - } - return reg.ReplaceAllString(key, "") -} - -func BuildProjectionExpression(attrs map[string]*dynamodb.AttributeValue) *string { - keys := []string{} - - for key := range attrs { - keys = append(keys, cleanKeyName(key)) - } - log.Printf("[DEBUG] ProjectionExpressions: %+v", strings.Join(keys, ", #a_")) - return aws.String("#a_" + strings.Join(keys, ", #a_")) -} - func buildTableItemID(tableName string, hashKey string, rangeKey string, attrs map[string]*dynamodb.AttributeValue) string { id := []string{tableName, hashKey} From a0f8de1dd4788cb4a7fd75e27b91f7d0d55fd956 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:27:31 -0500 Subject: [PATCH 018/123] deadcode: service/ec2. --- internal/service/ec2/status.go | 32 ------------------------------- internal/service/ec2/wait.go | 35 ---------------------------------- 2 files changed, 67 deletions(-) diff --git a/internal/service/ec2/status.go b/internal/service/ec2/status.go index be8ece11bb4..67f40990dc7 100644 --- a/internal/service/ec2/status.go +++ b/internal/service/ec2/status.go @@ -960,38 +960,6 @@ func StatusVolumeModificationState(ctx context.Context, conn *ec2.EC2, id string } } -func StatusVPCState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVPCByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} - -func StatusVPCAttributeValue(ctx context.Context, conn *ec2.EC2, id string, attribute string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - attributeValue, err := FindVPCAttribute(ctx, conn, id, attribute) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return attributeValue, strconv.FormatBool(attributeValue), nil - } -} - func StatusVPCCIDRBlockAssociationState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, _, err := FindVPCCIDRBlockAssociationByID(ctx, conn, id) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index bea1573b898..8319a984981 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -1667,41 +1667,6 @@ const ( vpcDeletedTimeout = 5 * time.Minute ) -func WaitVPCCreated(ctx context.Context, conn *ec2.EC2, id string) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.VpcStatePending}, - Target: []string{ec2.VpcStateAvailable}, - Refresh: StatusVPCState(ctx, conn, id), - Timeout: vpcCreatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - -func WaitVPCAttributeUpdated(ctx context.Context, conn *ec2.EC2, vpcID string, attribute string, expectedValue bool) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Target: []string{strconv.FormatBool(expectedValue)}, - Refresh: StatusVPCAttributeValue(ctx, conn, vpcID, attribute), - Timeout: ec2PropagationTimeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - func WaitVPCCIDRBlockAssociationCreated(ctx context.Context, conn *ec2.EC2, id string, timeout time.Duration) (*ec2.VpcCidrBlockState, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ec2.VpcCidrBlockStateCodeAssociating, ec2.VpcCidrBlockStateCodeDisassociated, ec2.VpcCidrBlockStateCodeFailing}, From 9826ee1b7f70200555a62725152d999045b51c52 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:29:45 -0500 Subject: [PATCH 019/123] deadcode: service/elasticache. --- .../service/elasticache/parameter_group.go | 14 ----------- .../elasticache/parameter_group_test.go | 25 ------------------- 2 files changed, 39 deletions(-) diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index a8423f2c68a..9fe188ece78 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -409,20 +409,6 @@ func FlattenParameters(list []*elasticache.Parameter) []map[string]interface{} { return result } -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func ExpandParameters(configured []interface{}) []*elasticache.ParameterNameValue { - parameters := make([]*elasticache.ParameterNameValue, len(configured)) - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for i, pRaw := range configured { - parameters[i] = expandParameter(pRaw.(map[string]interface{})) - } - - return parameters -} - func expandParameter(param map[string]interface{}) *elasticache.ParameterNameValue { return &elasticache.ParameterNameValue{ ParameterName: aws.String(param["name"].(string)), diff --git a/internal/service/elasticache/parameter_group_test.go b/internal/service/elasticache/parameter_group_test.go index b4b0e93762c..4e6cf8069ee 100644 --- a/internal/service/elasticache/parameter_group_test.go +++ b/internal/service/elasticache/parameter_group_test.go @@ -620,31 +620,6 @@ func TestFlattenParameters(t *testing.T) { } } -func TestExpandParameters(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - "apply_method": "immediate", - }, - } - parameters := tfelasticache.ExpandParameters(expanded) - - expected := &elasticache.ParameterNameValue{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - func TestParameterChanges(t *testing.T) { t.Parallel() From b7d0d8ae975cccca7ca471b183ed446c894f0fb2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:32:56 -0500 Subject: [PATCH 020/123] deadcode: service/iam. --- internal/service/iam/arn.go | 43 ---------------- internal/service/iam/arn_test.go | 84 -------------------------------- 2 files changed, 127 deletions(-) delete mode 100644 internal/service/iam/arn.go delete mode 100644 internal/service/iam/arn_test.go diff --git a/internal/service/iam/arn.go b/internal/service/iam/arn.go deleted file mode 100644 index 1abf14baf02..00000000000 --- a/internal/service/iam/arn.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -const ( - ARNSeparator = "/" - ARNService = "iam" - - InstanceProfileResourcePrefix = "instance-profile" -) - -// InstanceProfileARNToName converts Amazon Resource Name (ARN) to Name. -func InstanceProfileARNToName(inputARN string) (string, error) { - parsedARN, err := arn.Parse(inputARN) - - if err != nil { - return "", fmt.Errorf("parsing ARN (%s): %w", inputARN, err) - } - - if actual, expected := parsedARN.Service, ARNService; actual != expected { - return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - resourceParts := strings.Split(parsedARN.Resource, ARNSeparator) - - if actual, expected := len(resourceParts), 2; actual < expected { - return "", fmt.Errorf("expected at least %d resource parts in ARN (%s), got: %d", expected, inputARN, actual) - } - - if actual, expected := resourceParts[0], InstanceProfileResourcePrefix; actual != expected { - return "", fmt.Errorf("expected resource prefix %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - return resourceParts[len(resourceParts)-1], nil -} diff --git a/internal/service/iam/arn_test.go b/internal/service/iam/arn_test.go deleted file mode 100644 index ba07c02b7c2..00000000000 --- a/internal/service/iam/arn_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam_test - -import ( - "regexp" - "testing" - - "github.com/YakDriver/regexache" - tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" -) - -func TestInstanceProfileARNToName(t *testing.T) { - t.Parallel() - - testCases := []struct { - TestName string - InputARN string - ExpectedError *regexp.Regexp - ExpectedName string - }{ - { - TestName: "empty ARN", - InputARN: "", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "unparsable ARN", - InputARN: "test", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "invalid ARN service", - InputARN: "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected service iam`), - }, - { - TestName: "invalid ARN resource parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected at least 2 resource parts`), - }, - { - TestName: "invalid ARN resource prefix", - InputARN: "arn:aws:iam:us-east-1:123456789012:role/name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected resource prefix instance-profile`), - }, - { - TestName: "valid ARN", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - { - TestName: "valid ARN with multiple parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/path/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - - got, err := tfiam.InstanceProfileARNToName(testCase.InputARN) - - if err == nil && testCase.ExpectedError != nil { - t.Fatalf("expected error %s, got no error", testCase.ExpectedError.String()) - } - - if err != nil && testCase.ExpectedError == nil { - t.Fatalf("got unexpected error: %s", err) - } - - if err != nil && !testCase.ExpectedError.MatchString(err.Error()) { - t.Fatalf("expected error %s, got: %s", testCase.ExpectedError.String(), err) - } - - if got != testCase.ExpectedName { - t.Errorf("got %s, expected %s", got, testCase.ExpectedName) - } - }) - } -} From e14409e52d0791e503ee873728525886e89d5765 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:33:12 -0500 Subject: [PATCH 021/123] deadcode: service/kinesis. --- internal/service/kinesis/flex.go | 19 --------------- internal/service/kinesis/flex_test.go | 34 --------------------------- 2 files changed, 53 deletions(-) delete mode 100644 internal/service/kinesis/flex.go delete mode 100644 internal/service/kinesis/flex_test.go diff --git a/internal/service/kinesis/flex.go b/internal/service/kinesis/flex.go deleted file mode 100644 index c6a83295f5b..00000000000 --- a/internal/service/kinesis/flex.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func FlattenShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { - if len(list) == 0 { - return []string{} - } - strs := make([]string, 0, len(list[0].ShardLevelMetrics)) - for _, s := range list[0].ShardLevelMetrics { - strs = append(strs, *s) - } - return strs -} diff --git a/internal/service/kinesis/flex_test.go b/internal/service/kinesis/flex_test.go deleted file mode 100644 index f340759d2e1..00000000000 --- a/internal/service/kinesis/flex_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func TestFlattenShardLevelMetrics(t *testing.T) { - t.Parallel() - - expanded := []*kinesis.EnhancedMetrics{ - { - ShardLevelMetrics: []*string{ - aws.String("IncomingBytes"), - aws.String("IncomingRecords"), - }, - }, - } - result := FlattenShardLevelMetrics(expanded) - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - if result[0] != "IncomingBytes" { - t.Fatalf("expected element 0 to be IncomingBytes, but was %s", result[0]) - } - if result[1] != "IncomingRecords" { - t.Fatalf("expected element 0 to be IncomingRecords, but was %s", result[1]) - } -} From e94567c5d5804b572b876902ae708d5443e2d559 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:34:31 -0500 Subject: [PATCH 022/123] 'ListApplicationsPages' -> 'listApplicationsPages'. --- internal/service/kinesisanalytics/list.go | 2 +- internal/service/kinesisanalytics/sweep.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/kinesisanalytics/list.go b/internal/service/kinesisanalytics/list.go index 43d8984be93..4708493836f 100644 --- a/internal/service/kinesisanalytics/list.go +++ b/internal/service/kinesisanalytics/list.go @@ -12,7 +12,7 @@ import ( // Custom Kinesisanalytics listing functions using similar formatting as other service generated code. -func ListApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { +func listApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { for { output, err := conn.ListApplicationsWithContext(ctx, input) if err != nil { diff --git a/internal/service/kinesisanalytics/sweep.go b/internal/service/kinesisanalytics/sweep.go index 6b63e1b08a8..66359506afb 100644 --- a/internal/service/kinesisanalytics/sweep.go +++ b/internal/service/kinesisanalytics/sweep.go @@ -36,7 +36,7 @@ func sweepApplications(region string) error { var sweeperErrs *multierror.Error input := &kinesisanalytics.ListApplicationsInput{} - err = ListApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { + err = listApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { if page == nil { return !lastPage } From 4130694f85ea67dda7e33d1eafc04c51dfa1f960 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:35:49 -0500 Subject: [PATCH 023/123] deadcode: service/lakeformation. --- internal/service/lakeformation/strings.go | 11 ---- .../service/lakeformation/strings_test.go | 60 ------------------- 2 files changed, 71 deletions(-) diff --git a/internal/service/lakeformation/strings.go b/internal/service/lakeformation/strings.go index ece0ae79a5b..2dc826bdf37 100644 --- a/internal/service/lakeformation/strings.go +++ b/internal/service/lakeformation/strings.go @@ -23,14 +23,3 @@ func StringSlicesEqualIgnoreOrder(s1, s2 []*string) bool { return reflect.DeepEqual(v1, v2) } - -func StringSlicesEqual(s1, s2 []*string) bool { - if len(s1) != len(s2) { - return false - } - - v1 := aws.StringValueSlice(s1) - v2 := aws.StringValueSlice(s2) - - return reflect.DeepEqual(v1, v2) -} diff --git a/internal/service/lakeformation/strings_test.go b/internal/service/lakeformation/strings_test.go index a864d14f2f0..f9f890a4739 100644 --- a/internal/service/lakeformation/strings_test.go +++ b/internal/service/lakeformation/strings_test.go @@ -65,63 +65,3 @@ func TestStringSlicesEqualIgnoreOrder(t *testing.T) { } } } - -func TestStringSlicesEqual(t *testing.T) { - t.Parallel() - - equal := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b", "c"}, - }, - []interface{}{ - []string{"b", "a", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donut"}, - }, - []interface{}{ - []string{}, - []string{}, - }, - } - for _, v := range equal { - if !tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } - - notEqual := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b"}, - }, - []interface{}{ - []string{"a", "b", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato", "zucchini"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - []interface{}{ - []string{}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - } - for _, v := range notEqual { - if tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should not be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } -} From 41f0158da6f74374636bc059e32001728f0dd13b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:36:19 -0500 Subject: [PATCH 024/123] deadcode: service/lambda. --- internal/service/lambda/permission.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/internal/service/lambda/permission.go b/internal/service/lambda/permission.go index d3383355adc..410a48b1728 100644 --- a/internal/service/lambda/permission.go +++ b/internal/service/lambda/permission.go @@ -340,21 +340,6 @@ func FindPolicyStatementByTwoPartKey(ctx context.Context, conn *lambda.Lambda, f } } -func FindPolicyStatementByID(policy *Policy, id string) (*PolicyStatement, error) { - log.Printf("[DEBUG] Received %d statements in Lambda policy: %s", len(policy.Statement), policy.Statement) - for _, statement := range policy.Statement { - if statement.Sid == id { - return &statement, nil - } - } - - return nil, &retry.NotFoundError{ - LastRequest: id, - LastResponse: policy, - Message: fmt.Sprintf("Failed to find statement %q in Lambda policy:\n%s", id, policy.Statement), - } -} - func GetQualifierFromAliasOrVersionARN(arn string) (string, error) { matches := regexache.MustCompile(functionRegexp).FindStringSubmatch(arn) if len(matches) < 8 || matches[7] == "" { From 26525e579dd6749897aead68ee7b4f2c1b6b99c0 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Wed, 13 Dec 2023 18:05:06 -0500 Subject: [PATCH 025/123] Fix acceptance test linting and doc issue. --- .../service/finspace/kx_scaling_group_test.go | 198 +++++++++--------- .../r/finspace_kx_scaling_group.html.markdown | 5 +- 2 files changed, 102 insertions(+), 101 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 3f5b3714a94..28c988f4e2a 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -147,127 +147,127 @@ func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalin func testAccKxScalingGroupConfigBase(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} - output "account_id" { - value = data.aws_caller_identity.current.account_id - } +output "account_id" { + value = data.aws_caller_identity.current.account_id +} - resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - } +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} - resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn - } +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} - data "aws_iam_policy_document" "key_policy" { - statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] - resources = [ - aws_kms_key.test.arn, - ] + resources = [ + aws_kms_key.test.arn, + ] - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } - } + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } - statement { - actions = [ - "kms:*", - ] + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] - resources = [ - "*", - ] + resources = [ + "*", + ] - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] } + } +} - resource "aws_kms_key_policy" "test" { - key_id = aws_kms_key.test.id - policy = data.aws_iam_policy_document.key_policy.json - } +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} - resource "aws_vpc" "test" { - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true - } +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} - resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = "172.31.32.0/20" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - } +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} - resource "aws_security_group" "test" { - name = %[1]q - vpc_id = aws_vpc.test.id - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - } +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} - resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} - data "aws_route_tables" "rts" { - vpc_id = aws_vpc.test.id - } +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} - resource "aws_route" "r" { - route_table_id = tolist(data.aws_route_tables.rts.ids)[0] - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id - } - `, rName) +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) } func testAccKxScalingGroupConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxScalingGroupConfigBase(rName), fmt.Sprintf(` - resource "aws_finspace_kx_scaling_group" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - host_type = "kx.sg.4xlarge" - } - `, rName)) +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} +`, rName)) } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown index fa5717c5862..07eb9b2ced0 100644 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -18,7 +18,8 @@ Terraform resource for managing an AWS FinSpace Kx Scaling Group. resource "aws_finspace_kx_scaling_group" "test" { name = "my-tf-kx-scalinggroup" environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" + availability_zone_id = "use1-az2" + host_type = "kx.sg.4xlarge" } ``` @@ -43,7 +44,7 @@ This resource exports the following attributes in addition to the arguments abov * `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. * `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. -*`status` - The status of scaling group. +* `status` - The status of scaling group. * CREATING – The scaling group creation is in progress. * CREATE_FAILED – The scaling group creation has failed. * ACTIVE – The scaling group is active. From 76afd366616159260a7326f126d95e6b3bc5e7b1 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Wed, 13 Dec 2023 18:37:14 -0500 Subject: [PATCH 026/123] Fix acceptance test linting and doc issue. --- internal/service/finspace/kx_volume_test.go | 208 +++++++++--------- .../docs/r/finspace_kx_volume.html.markdown | 1 - 2 files changed, 104 insertions(+), 105 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 520c918c1d0..52008f1e25e 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -122,132 +122,132 @@ func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), fmt.Sprintf(` - resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } - } - `, rName)) +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) } func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} - output "account_id" { - value = data.aws_caller_identity.current.account_id - } +output "account_id" { + value = data.aws_caller_identity.current.account_id +} - resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - } +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} - resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn - } +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} - data "aws_iam_policy_document" "key_policy" { - statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] - resources = [ - aws_kms_key.test.arn, - ] + resources = [ + aws_kms_key.test.arn, + ] - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } - } - - statement { - actions = [ - "kms:*", - ] - - resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] } - resource "aws_kms_key_policy" "test" { - key_id = aws_kms_key.test.id - policy = data.aws_iam_policy_document.key_policy.json + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] } - - resource "aws_vpc" "test" { - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] } + } - resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = "172.31.32.0/20" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] } + } +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} - resource "aws_security_group" "test" { - name = %[1]q - vpc_id = aws_vpc.test.id +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - } +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} - resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} - data "aws_route_tables" "rts" { - vpc_id = aws_vpc.test.id - } +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} - resource "aws_route" "r" { - route_table_id = tolist(data.aws_route_tables.rts.ids)[0] - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id - } - `, rName) +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) } func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 71e855f1bc4..b573a81efdd 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -45,7 +45,6 @@ The following arguments are optional: * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume - ### nas1_configuration The nas1_configuration block supports the following arguments: From c12d9a5df40dc662f887fd3e6277d35206d7a026 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:08:24 -0500 Subject: [PATCH 027/123] r/aws_finspace_kx_scaling_group(test): fix test name typo --- internal/service/finspace/kx_scaling_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 28c988f4e2a..9730ce94184 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -58,7 +58,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { }) } -func TestAccFinSpaceKxScalingGroup_dissappears(t *testing.T) { +func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From 4fbaa349226a2b1d029d3c6d680f93ca4ff27558 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:09:18 -0500 Subject: [PATCH 028/123] r/aws_finspace_kx_scaling_group: reorg waiters, export finder --- internal/service/finspace/kx_scaling_group.go | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go index db5b8e9713c..c6248b4409e 100644 --- a/internal/service/finspace/kx_scaling_group.go +++ b/internal/service/finspace/kx_scaling_group.go @@ -148,44 +148,11 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) } -func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.KxScalingGroupStatusCreating), - Target: enum.Slice(types.KxScalingGroupStatusActive), - Refresh: statusKxScalingGroup(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { - return out, err - } - - return nil, err -} - -func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - out, err := findKxScalingGroupById(ctx, conn, id) - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return out, string(out.Status), nil - } -} - func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxScalingGroupById(ctx, conn, d.Id()) + out, err := FindKxScalingGroupById(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxScalingGroup (%s) not found, removing from state", d.Id()) d.SetId("") @@ -246,7 +213,7 @@ func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { +func FindKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { parts, err := flex.ExpandResourceId(id, kxScalingGroupIDPartCount, false) if err != nil { return nil, err @@ -275,6 +242,24 @@ func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id strin return out, nil } +func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusCreating), + Target: enum.Slice(types.KxScalingGroupStatusActive), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxScalingGroupStatusDeleting), @@ -290,3 +275,18 @@ func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id st return nil, err } + +func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := FindKxScalingGroupById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} From 2b70bb5274915e6bf3fab984054e7544663e742e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:24:29 -0500 Subject: [PATCH 029/123] r/aws_finspace_kx_scaling_group(test): prefer finder in test check func, fmt config --- .../service/finspace/kx_scaling_group_test.go | 94 +++++++++---------- 1 file changed, 43 insertions(+), 51 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 9730ce94184..24a8a16ed35 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -28,7 +27,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { } ctx := acctest.Context(t) - var KxScalingGroup finspace.GetKxScalingGroupOutput + var scalingGroup finspace.GetKxScalingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_scaling_group.test" @@ -44,7 +43,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { { Config: testAccKxScalingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxScalingGroupStatusActive)), ), @@ -64,7 +63,7 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { } ctx := acctest.Context(t) - var KxScalingGroup finspace.GetKxScalingGroupOutput + var scalingGroup finspace.GetKxScalingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_scaling_group.test" @@ -80,7 +79,7 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { { Config: testAccKxScalingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxScalingGroup(), resourceName), ), ExpectNonEmptyPlan: true, @@ -98,11 +97,7 @@ func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFu continue } - input := &finspace.GetKxScalingGroupInput{ - ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - } - _, err := conn.GetKxScalingGroup(ctx, input) + _, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -118,7 +113,7 @@ func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { +func testAccCheckKxScalingGroupExists(ctx context.Context, name string, scalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -130,16 +125,13 @@ func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalin } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxScalingGroup(ctx, &finspace.GetKxScalingGroupInput{ - ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - }) + resp, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, err) } - *KxScalingGroup = *resp + *scalingGroup = *resp return nil } @@ -166,45 +158,45 @@ resource "aws_finspace_kx_environment" "test" { data "aws_iam_policy_document" "key_policy" { statement { actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - + "kms:Decrypt", + "kms:GenerateDataKey" + ] + resources = [ - aws_kms_key.test.arn, + aws_kms_key.test.arn, ] - - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } } - + statement { - actions = [ - "kms:*", - ] - + actions = [ + "kms:*", + ] + resources = [ - "*", + "*", ] - + principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } } } @@ -229,15 +221,15 @@ resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id ingress { - from_port = 0 - to_port = 0 + from_port = 0 + to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 - to_port = 0 + to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } From 20725a168a75604c1e3e6bbd7d7aa66ba8698f0a Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:27:43 -0500 Subject: [PATCH 030/123] r/aws_finspace_kx_scaling_group(doc): fmt config --- .../r/finspace_kx_scaling_group.html.markdown | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown index 07eb9b2ced0..c4f34b5b600 100644 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -15,10 +15,10 @@ Terraform resource for managing an AWS FinSpace Kx Scaling Group. ### Basic Usage ```terraform -resource "aws_finspace_kx_scaling_group" "test" { - name = "my-tf-kx-scalinggroup" - environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" +resource "aws_finspace_kx_scaling_group" "example" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" } ``` @@ -45,14 +45,14 @@ This resource exports the following attributes in addition to the arguments abov * `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. * `status` - The status of scaling group. - * CREATING – The scaling group creation is in progress. - * CREATE_FAILED – The scaling group creation has failed. - * ACTIVE – The scaling group is active. - * UPDATING – The scaling group is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * DELETING – The scaling group is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the scaling group. - * DELETED – The scaling group is successfully deleted. + * `CREATING` – The scaling group creation is in progress. + * `CREATE_FAILED` – The scaling group creation has failed. + * `ACTIVE` – The scaling group is active. + * `UPDATING` – The scaling group is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `DELETING` – The scaling group is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the scaling group. + * `DELETED` – The scaling group is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). From 6eb984ca12c687cf78e455f179755ffca5c61333 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:29:44 -0500 Subject: [PATCH 031/123] r/aws_finspace_kx_scaling_group(test): terrafmt take 2 --- internal/service/finspace/kx_scaling_group_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 24a8a16ed35..6ef696fda1e 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -162,9 +162,9 @@ data "aws_iam_policy_document" "key_policy" { "kms:GenerateDataKey" ] - resources = [ + resources = [ aws_kms_key.test.arn, - ] + ] principals { type = "Service" @@ -199,7 +199,7 @@ data "aws_iam_policy_document" "key_policy" { } } } - + resource "aws_kms_key_policy" "test" { key_id = aws_kms_key.test.id policy = data.aws_iam_policy_document.key_policy.json From 900a3fc56612d0137f2071cfd8d2db58963f8479 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:38:27 -0500 Subject: [PATCH 032/123] r/aws_finspace_kx_scaling_group: prefer create.AppendDiagError --- internal/service/finspace/kx_scaling_group.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go index c6248b4409e..852fed8de75 100644 --- a/internal/service/finspace/kx_scaling_group.go +++ b/internal/service/finspace/kx_scaling_group.go @@ -120,7 +120,7 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m } rID, err := flex.FlattenResourceId(idParts, kxScalingGroupIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err) } d.SetId(rID) @@ -134,15 +134,15 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m out, err := conn.CreateKxScalingGroup(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err) } if out == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxScalingGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err) } return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) @@ -160,7 +160,7 @@ func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, met } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err) } d.Set("arn", out.ScalingGroupArn) d.Set("status", out.Status) @@ -174,7 +174,7 @@ func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, met parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err) } d.Set("environment_id", parts[0]) @@ -202,12 +202,12 @@ func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, m return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err) } _, err = waitKxScalingGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err) } return diags From 4a6992ca6189a78efc98abe8b69fb98509855542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 06:20:09 +0000 Subject: [PATCH 033/123] build(deps): bump the aws-sdk-go group with 3 updates Bumps the aws-sdk-go group with 3 updates: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go), [github.com/aws/aws-sdk-go-v2/service/emrserverless](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/pipes](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go` from 1.49.1 to 1.49.2 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.1...v1.49.2) Updates `github.com/aws/aws-sdk-go-v2/service/emrserverless` from 1.14.5 to 1.14.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/mq/v1.14.6/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.14.5...service/mq/v1.14.6) Updates `github.com/aws/aws-sdk-go-v2/service/pipes` from 1.9.5 to 1.9.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.9.5...service/pipes/v1.9.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/emrserverless dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/pipes dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 3821b9ff612..553689a5b38 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 @@ -41,7 +41,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 @@ -65,7 +65,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 - github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 + github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 diff --git a/go.sum b/go.sum index 002e8c2e279..1e073c94d70 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -108,8 +108,8 @@ github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 h1:LEYyWSnfdSSysPr5JWUkNwOD0MvX github.com/aws/aws-sdk-go-v2/service/eks v1.35.5/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 h1:IsLomor7ErBzqMCtI71gqTw0ENKbZxVhHMwSnDImbTw= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 h1:qMMMld3RbqxSZ5KEokAu+w4MGV9YlSvisJbk4iMO4m0= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5/go.mod h1:ydI4dfZIWil2hOsneE1QWDOxY/CdC37oT96S4JOrD24= github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 h1:n3TWZAn4gV2/GiJMnuNuSEkgyXHkKPEkenU5ZmmFS1o= @@ -168,8 +168,8 @@ github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 h1:V+zBQiUAATdw github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5/go.mod h1:Hky91JAG7y6hJrIoZ6IyJlB99+AFOPUIfqVQcZ+fbhY= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 h1:u0FL7wY1ni4WQkpfUiBslPmwKOltziQkGg5njTpPH6M= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5/go.mod h1:wRTpbH8h5d4SJmdsy9LNEuZNHrNtUCZMl+U1slAW4Ng= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 h1:BKJlKvRxWQCjd7UyZPLlvkvBDOf7UziF5spBSkMq3J4= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 h1:cDjJ1OsUDDHP0DERFe+kon0awE0vMt+6xjd9zuOaOv8= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 h1:/BHypWAWPEuwfnlb4hJz5R1uedDGNtorZgEHYtW/wI4= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5/go.mod h1:mmQzyk89+rKEfieMV8gHoFoVmrPiyKjqORj2Uk5+O04= github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 h1:yJniPHxzGy0jtJNkXYTqI8ps587kl1Jf8Luz5K8Jxjs= From 1a6b15833e9a3b627dbf15f9681b186a22b95670 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 06:38:03 +0000 Subject: [PATCH 034/123] build(deps): bump github.com/aws/aws-sdk-go in /.ci/providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.1 to 1.49.2. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.1...v1.49.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 ++-- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 13 +++++++++++++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index a4d456a3715..8d6db90f68b 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 7375ababe78..8755e6de2fb 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -8,8 +8,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index b3d8f8c2c94..41a2711656a 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -12007,6 +12007,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -34775,6 +34778,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index d681c129326..3b328cf28be 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.1 +# github.com/aws/aws-sdk-go v1.49.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From 1e55ca64631adacc50e347892f3ee6ee2b247cea Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 12:54:15 +0000 Subject: [PATCH 035/123] Update CHANGELOG.md for #34922 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52e14127080..65f9cccaf94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: @@ -30,6 +31,7 @@ BUG FIXES: * resource/aws_cloudwatch_log_group: Fix `invalid new value for .skip_destroy: was cty.False, but now null` errors ([#30354](https://github.com/hashicorp/terraform-provider-aws/issues/30354)) * resource/aws_cloudwatch_log_group: Remove default value (`STANDARD`) for `log_group_class` argument and mark as Computed. This fixes `InvalidParameterException: Only Standard log class is supported` errors in AWS Regions other than AWS Commercial ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) * resource/aws_db_instance: Fix error where Terraform loses track of resource if Blue/Green Deployment is applied outside of Terraform ([#34728](https://github.com/hashicorp/terraform-provider-aws/issues/34728)) +* resource/aws_dms_event_subscription: `source_ids` and `source_type` are Required ([#33731](https://github.com/hashicorp/terraform-provider-aws/issues/33731)) * resource/aws_ecr_pull_through_cache_rule: Fix plan time validation for `ecr_repository_prefix` ([#34716](https://github.com/hashicorp/terraform-provider-aws/issues/34716)) * resource/aws_lb: Correct in-place update of `security_groups` for Network Load Balancers when the new value is Computed ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Fix `InvalidConfigurationRequest: Load balancer attribute key 'dns_record.client_routing_policy' is not supported on load balancers with type 'network'` errors on resource Create in AWS GovCloud (US) ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) From de5e8e5487d3f5a4fbbf6ceb102006eca42be4c7 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:17:57 -0500 Subject: [PATCH 036/123] d/aws_polly_voices: new data source (#34916) This data source will allow practitioners to list available voices from AWS Polly. --- .changelog/34916.txt | 3 + internal/service/polly/service_package_gen.go | 7 +- internal/service/polly/voices_data_source.go | 153 ++++++++++++++++++ .../service/polly/voices_data_source_test.go | 84 ++++++++++ names/names.go | 1 + website/docs/d/polly_voices.html.markdown | 54 +++++++ 6 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 .changelog/34916.txt create mode 100644 internal/service/polly/voices_data_source.go create mode 100644 internal/service/polly/voices_data_source_test.go create mode 100644 website/docs/d/polly_voices.html.markdown diff --git a/.changelog/34916.txt b/.changelog/34916.txt new file mode 100644 index 00000000000..9c96cfacdd4 --- /dev/null +++ b/.changelog/34916.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_polly_voices +``` diff --git a/internal/service/polly/service_package_gen.go b/internal/service/polly/service_package_gen.go index 43fe19efee8..4391a3cc545 100644 --- a/internal/service/polly/service_package_gen.go +++ b/internal/service/polly/service_package_gen.go @@ -15,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceVoices, + Name: "Voices", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { diff --git a/internal/service/polly/voices_data_source.go b/internal/service/polly/voices_data_source.go new file mode 100644 index 00000000000..1274c5ba585 --- /dev/null +++ b/internal/service/polly/voices_data_source.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/polly" + awstypes "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Voices") +func newDataSourceVoices(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceVoices{}, nil +} + +const ( + DSNameVoices = "Voices Data Source" +) + +type dataSourceVoices struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceVoices) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_polly_voices" +} + +func (d *dataSourceVoices) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "engine": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Engine](), + Optional: true, + }, + "id": framework.IDAttribute(), + "include_additional_language_codes": schema.BoolAttribute{ + Optional: true, + }, + "language_code": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LanguageCode](), + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "voices": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[voicesData](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "additional_language_codes": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + "gender": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "language_code": schema.StringAttribute{ + Computed: true, + }, + "language_name": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "supported_engines": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + }, + }, + }, + }, + } +} +func (d *dataSourceVoices) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().PollyClient(ctx) + + var data dataSourceVoicesData + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + data.ID = types.StringValue(d.Meta().AccountID) + + input := &polly.DescribeVoicesInput{} + resp.Diagnostics.Append(flex.Expand(ctx, data, input)...) + if resp.Diagnostics.HasError() { + return + } + + // No paginator helper so pagination must be done manually + out := &polly.DescribeVoicesOutput{} + for { + page, err := conn.DescribeVoices(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Polly, create.ErrActionReading, DSNameVoices, data.ID.String(), err), + err.Error(), + ) + return + } + + if page == nil { + break + } + + if len(page.Voices) > 0 { + out.Voices = append(out.Voices, page.Voices...) + } + + input.NextToken = page.NextToken + if page.NextToken == nil { + break + } + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dataSourceVoicesData struct { + Engine fwtypes.StringEnum[awstypes.Engine] `tfsdk:"engine"` + ID types.String `tfsdk:"id"` + IncludeAdditionalLanguageCodes types.Bool `tfsdk:"include_additional_language_codes"` + LanguageCode fwtypes.StringEnum[awstypes.LanguageCode] `tfsdk:"language_code"` + Voices fwtypes.ListNestedObjectValueOf[voicesData] `tfsdk:"voices"` +} + +type voicesData struct { + AdditionalLanguageCodes fwtypes.ListValueOf[types.String] `tfsdk:"additional_language_codes"` + Gender types.String `tfsdk:"gender"` + ID types.String `tfsdk:"id"` + LanguageCode types.String `tfsdk:"language_code"` + LanguageName types.String `tfsdk:"language_name"` + Name types.String `tfsdk:"name"` + SupportedEngines fwtypes.ListValueOf[types.String] `tfsdk:"supported_engines"` +} diff --git a/internal/service/polly/voices_data_source_test.go b/internal/service/polly/voices_data_source_test.go new file mode 100644 index 00000000000..21eda739a7a --- /dev/null +++ b/internal/service/polly/voices_data_source_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccPollyVoicesDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_basic(), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func TestAccPollyVoicesDataSource_languageCode(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_languageCode(string(types.LanguageCodeEnUs)), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func testAccVoicesDataSourceConfig_basic() string { + return ` +data "aws_polly_voices" "test" {} +` +} + +func testAccVoicesDataSourceConfig_languageCode(languageCode string) string { + return fmt.Sprintf(` +data "aws_polly_voices" "test" { + language_code = %[1]q +} +`, languageCode) +} diff --git a/names/names.go b/names/names.go index 7eaf6c896fe..30dd6320c9d 100644 --- a/names/names.go +++ b/names/names.go @@ -66,6 +66,7 @@ const ( ObservabilityAccessManagerEndpointID = "oam" OpenSearchServerlessEndpointID = "aoss" PipesEndpointID = "pipes" + PollyEndpointID = "polly" PricingEndpointID = "pricing" QLDBEndpointID = "qldb" RedshiftDataEndpointID = "redshift-data" diff --git a/website/docs/d/polly_voices.html.markdown b/website/docs/d/polly_voices.html.markdown new file mode 100644 index 00000000000..d9f73c2573a --- /dev/null +++ b/website/docs/d/polly_voices.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "Polly" +layout: "aws" +page_title: "AWS: aws_polly_voices" +description: |- + Terraform data source for managing an AWS Polly Voices. +--- + +# Data Source: aws_polly_voices + +Terraform data source for managing an AWS Polly Voices. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_polly_voices" "example" {} +``` + +### With Language Code + +```terraform +data "aws_polly_voices" "example" { + language_code = "en-GB" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `engine` - (Optional) Engine used by Amazon Polly when processing input text for speech synthesis. Valid values are `standard`, `neural`, and `long-form`. +* `include_additional_language_codes` - (Optional) Whether to return any bilingual voices that use the specified language as an additional language. +* `language_code` - (Optional) Language identification tag for filtering the list of voices returned. If not specified, all available voices are returned. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `id` - AWS account ID. +* `voices` - List of voices with their properties. See [`voices` Attribute Reference](#voices-attribute-reference) below. + +### `voices` Attribute Reference + +See the [AWS Polly Voice documentation](https://docs.aws.amazon.com/polly/latest/dg/API_Voice.html) for additional details. + +* `additional_language_codes` - Additional codes for languages available for the specified voice in addition to its default language. +* `gender` - Gender of the voice. +* `id` - Amazon Polly assigned voice ID. +* `language_code` - Language code of the voice. +* `language_name` - Human readable name of the language in English. +* `name` - Name of the voice. +* `supported_engines` - Specifies which engines are supported by a given voice. From e7a637364376983820c5df30a5377cf94cbf18cf Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 14:20:29 +0000 Subject: [PATCH 037/123] Update CHANGELOG.md for #34916 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65f9cccaf94..fce62da4221 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) From b1db62e1f396f61a056e39583a0eeb6f113f42b0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:25:10 -0500 Subject: [PATCH 038/123] r/aws_finspace_kx_scaling_group(test): add _tags --- .../service/finspace/kx_scaling_group_test.go | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 6ef696fda1e..b3ee1407893 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -88,6 +88,62 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { }) } +func TestAccFinSpaceKxScalingGroup_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var scalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxScalingGroupConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -263,3 +319,38 @@ resource "aws_finspace_kx_scaling_group" "test" { } `, rName)) } + +func testAccKxScalingGroupConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxScalingGroupConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} From 96399e660de24c132af31bf300cd36dd13b9e910 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:35:17 -0500 Subject: [PATCH 039/123] r/aws_finspace_kx_volume: prefer create.AppendDiagError --- internal/service/finspace/kx_volume.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 0edbc919ffc..c76aa3b1f7b 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -177,7 +177,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in } rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err) } d.SetId(rID) @@ -203,21 +203,21 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in out, err := conn.CreateKxVolume(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) } if out == nil || out.VolumeName == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err) } // The CreateKxVolume API currently fails to tag the Volume when the // Tags field is set. Until the API is fixed, tag after creation instead. if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -236,7 +236,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err) } d.Set("arn", out.VolumeArn) @@ -252,16 +252,16 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } d.Set("environment_id", parts[0]) @@ -296,10 +296,10 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) if _, err := conn.UpdateKxVolume(ctx, in); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -321,12 +321,12 @@ func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err) } _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err) } return diags From 58ea0a7f9bcfe3225cd19b90a17e96608a7c8de9 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:39:11 -0500 Subject: [PATCH 040/123] r/aws_finspace_kx_volume: alphabetize attributes, fix conn init --- internal/service/finspace/kx_volume.go | 118 ++++++++++++------------- 1 file changed, 57 insertions(+), 61 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c76aa3b1f7b..c550a4ead2f 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -47,7 +46,36 @@ func ResourceKxVolume() *schema.Resource { }, Schema: map[string]*schema.Schema{ - + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, "availability_zones": { Type: schema.TypeList, Elem: &schema.Schema{ @@ -56,39 +84,31 @@ func ResourceKxVolume() *schema.Resource { Required: true, ForceNew: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, "az_mode": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateDiagFunc: enum.Validate[types.KxAzMode](), }, - "environment_id": { + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 32), + ValidateFunc: validation.StringLenBetween(1, 1000), }, - "name": { + "environment_id": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + ValidateFunc: validation.StringLenBetween(1, 32), }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 1000), + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, }, "nas1_configuration": { Type: schema.TypeList, @@ -111,13 +131,11 @@ func ResourceKxVolume() *schema.Resource { }, }, }, - "created_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "last_modified_timestamp": { - Type: schema.TypeString, - Computed: true, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), }, "status": { Type: schema.TypeString, @@ -127,34 +145,14 @@ func ResourceKxVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "attached_clusters": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "cluster_status": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), - }, - "cluster_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterType](), - }, - }, - }, - Computed: true, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, }, CustomizeDiff: verify.SetTagsDiff, } @@ -167,7 +165,7 @@ const ( func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) environmentId := d.Get("environment_id").(string) volumeName := d.Get("name").(string) @@ -199,8 +197,6 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) } - // TODO: add flatten/expand functions for remaining parameters - out, err := conn.CreateKxVolume(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) @@ -225,7 +221,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) out, err := findKxVolumeByID(ctx, conn, d.Id()) @@ -270,7 +266,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) updateVolume := false @@ -307,7 +303,7 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ From cecc3f2886187882451e0aaf9765354e6e6b20b1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:41:28 -0500 Subject: [PATCH 041/123] r/aws_finspace_kx_volume(test): fix disappears test name --- internal/service/finspace/kx_volume_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 52008f1e25e..84dade247bd 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -58,7 +58,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { }) } -func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { +func TestAccFinSpaceKxVolume_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From fbb6fd67133021384ac24fba1b042514ca61ef5c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:45:40 -0500 Subject: [PATCH 042/123] r/aws_finspace_kx_volume(test): use find in test check func --- internal/service/finspace/kx_volume.go | 6 +- internal/service/finspace/kx_volume_test.go | 66 +++++++++------------ 2 files changed, 32 insertions(+), 40 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c550a4ead2f..72bad740dcc 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -223,7 +223,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxVolumeByID(ctx, conn, d.Id()) + out, err := FindKxVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) @@ -382,7 +382,7 @@ func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findKxVolumeByID(ctx, conn, id) + out, err := FindKxVolumeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -395,7 +395,7 @@ func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry } } -func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { +func FindKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) if err != nil { return nil, err diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 84dade247bd..b1af01ddd14 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -28,7 +27,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -44,7 +43,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), ), @@ -64,7 +63,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -80,7 +79,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), ), ExpectNonEmptyPlan: true, @@ -98,11 +97,7 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - } - _, err := conn.GetKxVolume(ctx, input) + _, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -118,6 +113,30 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { } } +func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + resp, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *volume = *resp + + return nil + } +} + func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), @@ -249,30 +268,3 @@ resource "aws_route" "r" { } `, rName) } - -func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - }) - - if err != nil { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) - } - - *KxVolume = *resp - - return nil - } -} From 102a7f8f2952d300258214932eb5b02d25d4928e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:46:41 -0500 Subject: [PATCH 043/123] chore: changelog --- .changelog/34832.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34832.txt diff --git a/.changelog/34832.txt b/.changelog/34832.txt new file mode 100644 index 00000000000..0794acc706f --- /dev/null +++ b/.changelog/34832.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_scaling_group +``` From 08fcc0b0f79b45dc1471fd9082b1b83deb8cf16c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:47:28 -0500 Subject: [PATCH 044/123] chore: changelog --- .changelog/34833.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34833.txt diff --git a/.changelog/34833.txt b/.changelog/34833.txt new file mode 100644 index 00000000000..e1e350824ea --- /dev/null +++ b/.changelog/34833.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_volume +``` From 5a7600eda4cce5c6c3f5584c3fe26dbbc534424e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:59:52 -0500 Subject: [PATCH 045/123] r/aws_finspace_kx_volume(test): fmt config --- internal/service/finspace/kx_volume_test.go | 126 ++++++++++---------- 1 file changed, 61 insertions(+), 65 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index b1af01ddd14..616261e5097 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -137,32 +137,10 @@ func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspa } } -func testAccKxVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccKxVolumeConfigBase(rName), - fmt.Sprintf(` -resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } -} -`, rName)) -} - func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} data "aws_partition" "current" {} - -output "account_id" { - value = data.aws_caller_identity.current.account_id -} resource "aws_kms_key" "test" { deletion_window_in_days = 7 @@ -175,49 +153,49 @@ resource "aws_finspace_kx_environment" "test" { data "aws_iam_policy_document" "key_policy" { statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - resources = [ - aws_kms_key.test.arn, - ] - - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } } statement { - actions = [ - "kms:*", - ] - + actions = [ + "kms:*", + ] + resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } } } - + resource "aws_kms_key_policy" "test" { key_id = aws_kms_key.test.id policy = data.aws_iam_policy_document.key_policy.json @@ -240,16 +218,16 @@ resource "aws_security_group" "test" { ingress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } } @@ -268,3 +246,21 @@ resource "aws_route" "r" { } `, rName) } + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) +} From 65bd94e709d92e7759709948a8d7043738510fb1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:07:48 -0500 Subject: [PATCH 046/123] r/aws_finspace_kx_volume(doc): fmt config, tidy descriptions --- .../docs/r/finspace_kx_volume.html.markdown | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index b573a81efdd..35e75bf3e32 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -16,15 +16,15 @@ Terraform resource for managing an AWS FinSpace Kx Volume. ```terraform resource "aws_finspace_kx_volume" "example" { - name = "my-tf-kx-volume" - environment_id = aws_finspace_kx_environment.example.id - availability_zones = "use1-az2" - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + size= 1200 + type= "SSD_250" + } } ``` @@ -33,24 +33,24 @@ resource "aws_finspace_kx_volume" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. - * SINGLE - Assigns one availability zone per volume. + * `SINGLE` - Assigns one availability zone per volume. * `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. * `name` - (Required) Unique name for the volumr that you want to create. -* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports the `NAS_1` volume type. When you select the `NAS_1` volume type, you must also provide `nas1_configuration`. * `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. The following arguments are optional: -* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volume_type` is `NAS_1`. See [`nas1_configuration` Argument Reference](#nas1_configuration-argument-reference) below. * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume -### nas1_configuration +### `nas1_configuration` Argument Reference -The nas1_configuration block supports the following arguments: +The `nas1_configuration` block supports the following arguments: * `size` - (Required) The size of the network attached storage. -* `security_group_ids` - (Required) The type of the network attached storage. +* `type` - (Required) The type of the network attached storage. ## Attribute Reference @@ -59,15 +59,15 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX volume. * `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `status` - The status of volume creation. - * CREATING – The volume creation is in progress. - * CREATE_FAILED – The volume creation has failed. - * ACTIVE – The volume is active. - * UPDATING – The volume is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * UPDATED – The volume is successfully updated. - * DELETING – The volume is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the volume. - * DELETED – The volume is successfully deleted. + * `CREATING` – The volume creation is in progress. + * `CREATE_FAILED` – The volume creation has failed. + * `ACTIVE` – The volume is active. + * `UPDATING` – The volume is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `UPDATED` – The volume is successfully updated. + * `DELETING` – The volume is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the volume. + * `DELETED` – The volume is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. From 46e58e6438388bf02468c1d887190766160999af Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 10:08:51 -0500 Subject: [PATCH 047/123] Update internal/service/finspace/kx_cluster_test.go Co-authored-by: Jared Baker --- internal/service/finspace/kx_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 16c5b53d1d6..8ca4504aad7 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -672,7 +672,7 @@ func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxClusterInScalingGroup_withKxDataview(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxDataview(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From e68fd67a607911841e9838dcf92b2ff9a4802c62 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:09:59 -0500 Subject: [PATCH 048/123] r/aws_finspace_kx_volume(doc): fmt config again --- website/docs/r/finspace_kx_volume.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 35e75bf3e32..0ddc66dc9e6 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -22,8 +22,8 @@ resource "aws_finspace_kx_volume" "example" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - size= 1200 - type= "SSD_250" + size = 1200 + type = "SSD_250" } } ``` From a209c33f2582d1319efedb74420483db62006a96 Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 10:10:21 -0500 Subject: [PATCH 049/123] Apply suggestions from code review Co-authored-by: Jared Baker --- internal/service/finspace/kx_cluster_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 8ca4504aad7..14451a2d9f1 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -32,7 +32,7 @@ func testAccPreCheckManagedKxLicenseEnabled(t *testing.T) { } } -func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { +func TestAccFinSpaceKxCluster_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -69,7 +69,7 @@ func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { +func TestAccFinSpaceKxCluster_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -608,7 +608,7 @@ func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -640,7 +640,7 @@ func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From 1ae3cedb7a2a8ced747b2c529cc12d3fda607d44 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:12:38 -0500 Subject: [PATCH 050/123] r/aws_finspace_kx_volume(test): fmt config again --- internal/service/finspace/kx_volume_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 616261e5097..fa80b8b039a 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -258,8 +258,8 @@ resource "aws_finspace_kx_volume" "test" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + type = "SSD_250" + size = 1200 } } `, rName)) From 31cb4334856df230e0a317a3237e92cc9f9927e1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:20:51 -0500 Subject: [PATCH 051/123] r/aws_finspace_kx_dataview: add headers --- internal/service/finspace/kx_dataview_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 0a26282997d..9bce6348592 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package finspace_test import ( From 0834c20027077a46eb6939e7269d48668b31d9dc Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:22:25 -0500 Subject: [PATCH 052/123] r/aws_finspace_kx_dataview: prefer create.AppendDiagError --- internal/service/finspace/kx_dataview.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index b18aaa8f7e8..09bc349afb7 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -148,7 +148,7 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta rId, err := flex.FlattenResourceId(idParts, kxDataviewIdPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err) } d.SetId(rId) @@ -156,7 +156,7 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta DatabaseName: aws.String(d.Get("database_name").(string)), DataviewName: aws.String(d.Get("name").(string)), EnvironmentId: aws.String(d.Get("environment_id").(string)), - AutoUpdate: *aws.Bool(d.Get("auto_update").(bool)), + AutoUpdate: d.Get("auto_update").(bool), AzMode: types.KxAzMode(d.Get("az_mode").(string)), ClientToken: aws.String(id.UniqueId()), Tags: getTagsIn(ctx), @@ -180,13 +180,13 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta out, err := conn.CreateKxDataview(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err) } if out == nil || out.DataviewName == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxDataviewCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err) } return append(diags, resourceKxDataviewRead(ctx, d, meta)...) @@ -204,7 +204,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err) } d.Set("name", out.DataviewName) d.Set("description", out.Description) @@ -218,7 +218,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("environment_id", out.EnvironmentId) d.Set("az_mode", out.AzMode) if err := d.Set("segment_configurations", flattenSegmentConfigurations(out.SegmentConfigurations)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err) } return diags @@ -243,11 +243,11 @@ func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta } if _, err := conn.UpdateKxDataview(ctx, in); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err) } if _, err := waitKxDataviewUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err) } return append(diags, resourceKxDataviewRead(ctx, d, meta)...) @@ -269,11 +269,11 @@ func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta if errors.As(err, &nfe) { return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err) } if _, err := waitKxDataviewDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err) } return diags } From ef5a2d87841c6583daa9963eeb3c352df901978b Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:24:22 -0500 Subject: [PATCH 053/123] r/aws_finspace_kx_dataview: alphabetize attributes --- internal/service/finspace/kx_dataview.go | 74 ++++++++++++------------ 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 09bc349afb7..add5b00bc2e 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -6,12 +6,16 @@ package finspace import ( "context" "errors" + "log" + "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -19,8 +23,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" - "log" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -50,36 +52,14 @@ func ResourceKxDataview() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "environment_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "database_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1000), - }, "auto_update": { Type: schema.TypeBool, ForceNew: true, Required: true, }, - "changeset_id": { + "availability_zone_id": { Type: schema.TypeString, + ForceNew: true, Optional: true, }, "az_mode": { @@ -88,11 +68,41 @@ func ResourceKxDataview() *schema.Resource { ForceNew: true, ValidateDiagFunc: enum.Validate[types.KxAzMode](), }, - "availability_zone_id": { + "changeset_id": { Type: schema.TypeString, - ForceNew: true, Optional: true, }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, "segment_configurations": { Type: schema.TypeList, Elem: &schema.Resource{ @@ -116,14 +126,6 @@ func ResourceKxDataview() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "created_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "last_modified_timestamp": { - Type: schema.TypeString, - Computed: true, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, From bfdec1686b0c8b3ace004054682417e016e2bc4f Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:25:04 -0500 Subject: [PATCH 054/123] chore: changelog --- .changelog/34828.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34828.txt diff --git a/.changelog/34828.txt b/.changelog/34828.txt new file mode 100644 index 00000000000..cfa7c1c5289 --- /dev/null +++ b/.changelog/34828.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_dataview +``` From cf484a231ec772f65d032f55413505782b6c8459 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:35:13 -0500 Subject: [PATCH 055/123] r/aws_finspace_kx_volume: nolintlint --- internal/service/finspace/kx_volume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 72bad740dcc..0446eb223ce 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -346,7 +346,7 @@ func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, return nil, err } -func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), Target: enum.Slice(types.KxVolumeStatusActive), From 6d8e014383f2136be2041c4c4f7a56e45e39d477 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:41:32 -0500 Subject: [PATCH 056/123] r/aws_finspace_kx_dataview(test): reorganize --- internal/service/finspace/kx_dataview_test.go | 152 +++++++++--------- 1 file changed, 73 insertions(+), 79 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 9bce6348592..1410cd96274 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" @@ -14,10 +16,10 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" "github.com/hashicorp/terraform-provider-aws/names" - "testing" ) func TestAccFinSpaceKxDataview_basic(t *testing.T) { @@ -26,7 +28,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { } ctx := acctest.Context(t) - var kxdataview finspace.GetKxDataviewOutput + var dataview finspace.GetKxDataviewOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_dataview.test" @@ -42,7 +44,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { { Config: testAccKxDataviewConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), ), @@ -61,7 +63,7 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { } ctx := acctest.Context(t) - var kxdataview finspace.GetKxDataviewOutput + var dataview finspace.GetKxDataviewOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_dataview.test" @@ -77,7 +79,7 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { { Config: testAccKxDataviewConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxDataview(), resourceName), ), ExpectNonEmptyPlan: true, @@ -86,38 +88,38 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { }) } -func testAccKxDataviewConfigBase(rName string) string { - return fmt.Sprintf(` -resource "aws_kms_key" "test" { - deletion_window_in_days = 7 -} +func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } -resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn -} -resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id -} -`, rName) -} -func testAccKxDataviewConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccKxDataviewConfigBase(rName), - fmt.Sprintf(` -resource "aws_finspace_kx_dataview" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - database_name = aws_finspace_kx_database.test.name - auto_update = true - az_mode = "SINGLE" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] -} -`, rName)) + ctx := acctest.Context(t) + var dataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + }, + }) } -func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { +func testAccCheckKxDataviewExists(ctx context.Context, name string, dataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -138,7 +140,7 @@ func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview * return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) } - *kxdataview = *resp + *dataview = *resp return nil } @@ -170,8 +172,43 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccKxDataviewVolumeBase(rName string) string { +func testAccKxDataviewConfigBase(rName string) string { return fmt.Sprintf(` +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} + +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} +`, rName) +} + +func testAccKxDataviewConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName)) +} + +func testAccKxDataviewConfig_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` resource "aws_finspace_kx_volume" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -183,14 +220,7 @@ resource "aws_finspace_kx_volume" "test" { size= 1200 } } -`, rName) -} -func testAccKxDataviewConfig_withKxVolume(rName string) string { - return acctest.ConfigCompose( - testAccKxDataviewConfigBase(rName), - testAccKxDataviewVolumeBase(rName), - fmt.Sprintf(` resource "aws_finspace_kx_dataview" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -206,39 +236,3 @@ resource "aws_finspace_kx_dataview" "test" { } `, rName)) } - -func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - ctx := acctest.Context(t) - - var kxdataview finspace.GetKxDataviewOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_finspace_kx_dataview.test" - - resource.ParallelTest(t, resource.TestCase{ - - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, finspace.ServiceID) - }, - ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), - - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - - CheckDestroy: testAccCheckKxDataviewDestroy(ctx), - - Steps: []resource.TestStep{ - { - Config: testAccKxDataviewConfig_withKxVolume(rName), - - Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), - ), - }, - }, - }) -} From db3b97354cb929c19ef49120650ae10cf53dde0c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:44:25 -0500 Subject: [PATCH 057/123] r/aws_finspace_kx_dataview(test): prefer finder in test check func --- internal/service/finspace/kx_dataview.go | 6 +++--- internal/service/finspace/kx_dataview_test.go | 20 ++++++++----------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index add5b00bc2e..a269c6b1172 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -198,7 +198,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxDataviewById(ctx, conn, d.Id()) + out, err := FindKxDataviewById(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxDataview (%s) not found, removing from state", d.Id()) d.SetId("") @@ -280,7 +280,7 @@ func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { +func FindKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { idParts, err := flex.ExpandResourceId(id, kxDataviewIdPartCount, false) if err != nil { return nil, err @@ -364,7 +364,7 @@ func waitKxDataviewDeleted(ctx context.Context, conn *finspace.Client, id string func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findKxDataviewById(ctx, conn, id) + out, err := FindKxDataviewById(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 1410cd96274..9f1d7fa9862 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -57,6 +56,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { }, }) } + func TestAccFinSpaceKxDataview_disappears(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") @@ -131,11 +131,8 @@ func testAccCheckKxDataviewExists(ctx context.Context, name string, dataview *fi } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ - DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - DataviewName: aws.String(rs.Primary.Attributes["name"]), - }) + + resp, err := tffinspace.FindKxDataviewById(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) } @@ -154,11 +151,8 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - _, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ - DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - DataviewName: aws.String(rs.Primary.Attributes["name"]), - }) + + _, err := tffinspace.FindKxDataviewById(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -166,8 +160,10 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } return err } - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, errors.New("not destroyed")) } + return nil } } From b0ca6929c144d4464e6b675d68509a898a3c036b Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Thu, 14 Dec 2023 11:28:02 -0500 Subject: [PATCH 058/123] Fix acceptance test linting. --- internal/service/finspace/kx_dataview_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 9f1d7fa9862..cfe02b985c5 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -175,8 +175,8 @@ resource "aws_kms_key" "test" { } resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn + name = %[1]q + kms_key_id = aws_kms_key.test.arn } resource "aws_finspace_kx_database" "test" { @@ -206,14 +206,14 @@ func testAccKxDataviewConfig_withKxVolume(rName string) string { testAccKxDataviewConfigBase(rName), fmt.Sprintf(` resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" + az_mode = "SINGLE" + type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + type= "SSD_250" + size= 1200 } } @@ -226,8 +226,8 @@ resource "aws_finspace_kx_dataview" "test" { availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] segment_configurations { - db_paths = ["/*"] - volume_name = aws_finspace_kx_volume.test.name + db_paths = ["/*"] + volume_name = aws_finspace_kx_volume.test.name } } `, rName)) From 3836f546c5fc78bfe85c611a2baa347cd12ccf9a Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 11:55:44 -0500 Subject: [PATCH 059/123] Fix linting, rename tests --- internal/service/finspace/kx_cluster_test.go | 74 ++++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 14451a2d9f1..9d69793554a 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -608,7 +608,7 @@ func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_RDBInScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -640,7 +640,7 @@ func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_TPInScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -876,36 +876,36 @@ resource "aws_route" "r" { func testAccKxClusterConfigScalingGroupBase(rName string) string { return fmt.Sprintf(` - resource "aws_finspace_kx_scaling_group" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - host_type = "kx.sg.4xlarge" - } +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} `, rName) } func testAccKxClusterConfigKxVolumeBase(rName string) string { return fmt.Sprintf(` - resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_1000" - size= 1200 - } - } +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_1000" + size = 1200 + } +} `, rName) } func testAccKxClusterConfigKxDataviewBase(rName string) string { return fmt.Sprintf(` resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id } resource "aws_finspace_kx_dataview" "test" { @@ -964,10 +964,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } } `, rName)) @@ -980,9 +980,9 @@ func testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName string) string { testAccKxClusterConfigScalingGroupBase(rName), fmt.Sprintf(` resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - } + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} resource "aws_finspace_kx_cluster" "test" { name = %[1]q @@ -999,10 +999,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } database { database_name = aws_finspace_kx_database.test.name @@ -1035,10 +1035,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } tickerplant_log_configuration { tickerplant_log_volumes = [aws_finspace_kx_volume.test.name] @@ -1069,15 +1069,15 @@ resource "aws_finspace_kx_cluster" "test" { scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } database { database_name = aws_finspace_kx_database.test.name - dataview_name = aws_finspace_kx_dataview.test.name + dataview_name = aws_finspace_kx_dataview.test.name } lifecycle { From 583c90e6da85236ea8e9280600e9b5dd190c2680 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:39:58 -0500 Subject: [PATCH 060/123] r/aws_finspace_kx_dataview(test): fmt config --- internal/service/finspace/kx_dataview_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index cfe02b985c5..88096f6bc92 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -212,8 +212,8 @@ resource "aws_finspace_kx_volume" "test" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + size = 1200 + type = "SSD_250" } } @@ -227,7 +227,7 @@ resource "aws_finspace_kx_dataview" "test" { segment_configurations { db_paths = ["/*"] - volume_name = aws_finspace_kx_volume.test.name + volume_name = aws_finspace_kx_volume.test.name } } `, rName)) From 6c9ce2ae31858e5a514e62f438818b6d3953c62e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:42:02 -0500 Subject: [PATCH 061/123] r/aws_finspace_kx_dataview(doc): fmt config, tidy descriptions --- website/docs/r/finspace_kx_dataview.html.markdown | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index 70a2b762c90..7816b6651d0 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -23,7 +23,7 @@ resource "aws_finspace_kx_dataview" "example" { description = "Terraform managed Kx Dataview" az_mode = "SINGLE" auto_update = true - + segment_configurations { volume_name = aws_finspace_kx_volume.example.name db_paths = ["/*"] @@ -36,25 +36,26 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * SINGLE - Assigns one availability zone per cluster. - * MULTI - Assigns all the availability zones per cluster. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. * `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. The following arguments are optional: + * `auto_update` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. * `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. * `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. * `description` - (Optional) A description for the dataview. -* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations). +* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations-argument-reference) below. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### segment_configurations +### `segment_configurations` Argument Reference + * `db_paths` - (Required) The database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. * `volume_name` - (Required) The name of the volume that you want to attach to a dataview. This volume must be in the same availability zone as the dataview that you are attaching to. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: From 568d51a317508d1aefa5b9a9bf89eef63cc4adb7 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:49:52 -0500 Subject: [PATCH 062/123] r/aws_finspace_kx_dataview(doc): terrafmt, markdownlint fixes --- website/docs/r/finspace_kx_dataview.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index 7816b6651d0..ad2d6b51049 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -24,7 +24,7 @@ resource "aws_finspace_kx_dataview" "example" { az_mode = "SINGLE" auto_update = true - segment_configurations { + segment_configurations { volume_name = aws_finspace_kx_volume.example.name db_paths = ["/*"] } @@ -36,9 +36,9 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * `SINGLE` - Assigns one availability zone per cluster. - * `MULTI` - Assigns all the availability zones per cluster. -* `database_name` - (Required) The name of the database where you want to create a dataview. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. +* `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. @@ -63,7 +63,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX dataview. * `created_timestamp` - Timestamp at which the dataview was created in FinSpace. Value determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `id` - A comma-delimited string joining environment ID, database name and dataview name. -* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Timeouts From f63ea254fda343a4612f96487bb0075682e0b470 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:51:50 -0500 Subject: [PATCH 063/123] r/aws_finspace_kx_dataview(test): terrafmt --- internal/service/finspace/kx_dataview_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 88096f6bc92..0dda532a630 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -226,7 +226,7 @@ resource "aws_finspace_kx_dataview" "test" { availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] segment_configurations { - db_paths = ["/*"] + db_paths = ["/*"] volume_name = aws_finspace_kx_volume.test.name } } From 0e0b6dded79c4ae46fd8710e2dcd902508c11ba8 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:55:18 -0500 Subject: [PATCH 064/123] r/aws_finspace_kx_dataview(doc): missed markdownlint --- website/docs/r/finspace_kx_dataview.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index ad2d6b51049..f7f77da7742 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -36,8 +36,8 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * `SINGLE` - Assigns one availability zone per cluster. - * `MULTI` - Assigns all the availability zones per cluster. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. * `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. From f46235636d1a666acb5caab2f8604a7491cc1d26 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:57:06 -0500 Subject: [PATCH 065/123] r/aws_finspace_kx_dataview: semgrep fix --- internal/service/finspace/kx_dataview.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index a269c6b1172..28e42c5fac6 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -375,7 +375,7 @@ func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) ret } } -func expandDbPath(tfList []interface{}) []string { +func expandDBPath(tfList []interface{}) []string { if tfList == nil { return nil } @@ -397,7 +397,7 @@ func expandSegmentConfigurations(tfList []interface{}) []types.KxDataviewSegment m := v.(map[string]interface{}) s = append(s, types.KxDataviewSegmentConfiguration{ VolumeName: aws.String(m["volume_name"].(string)), - DbPaths: expandDbPath(m["db_paths"].([]interface{})), + DbPaths: expandDBPath(m["db_paths"].([]interface{})), }) } From 56bedf44ff4639874d9e166350e29376a1c7d667 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:58:09 -0500 Subject: [PATCH 066/123] r/aws_finspace_kx_dataview: importlint fix --- internal/service/finspace/kx_dataview.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 28e42c5fac6..07bc92180df 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -15,6 +15,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -23,9 +25,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) // @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") From 95a4157290dc7ce51c8eeed999847bba618ef4aa Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:59:30 -0500 Subject: [PATCH 067/123] r/aws_finspace_kx_dataview(doc): fix title heading --- website/docs/r/finspace_kx_dataview.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index f7f77da7742..44b77ca5aa9 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -3,10 +3,10 @@ subcategory: "FinSpace" layout: "aws" page_title: "AWS: aws_finspace_kx_dataview" description: |- - Terraform resource for managing an AWS FinSpace Kx Dataviewk. + Terraform resource for managing an AWS FinSpace Kx Dataview. --- -# Resource: aws_finspace_dataview +# Resource: aws_finspace_kx_dataview Terraform resource for managing an AWS FinSpace Kx Dataview. From d3ed118585662f0bbd6e2b377dd9d2713673ac0a Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 18:46:26 +0000 Subject: [PATCH 068/123] Update CHANGELOG.md for #34915 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fce62da4221..f41aab55250 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES: * **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_finspace_kx_scaling_group` ([#34832](https://github.com/hashicorp/terraform-provider-aws/issues/34832)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: From 74cb6b501ba203ff0bc252beb59ac2e5f482e557 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:30:12 -0500 Subject: [PATCH 069/123] Added acceptance test for changing ALB stickiness type --- internal/service/elbv2/target_group_test.go | 93 +++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index e1e6fba7ec3..5a5f81c053a 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1520,6 +1520,99 @@ func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { }) } +func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { + ctx := acctest.Context(t) + var conf elbv2.TargetGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_appStickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "app_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { ctx := acctest.Context(t) var conf elbv2.TargetGroup From 668fcd10ffbec5f91144c406f02b4fba4b25ea6c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 14:28:50 -0500 Subject: [PATCH 070/123] elbv2: Move constants. --- internal/service/elbv2/const.go | 26 ++++++++++++++++++++++++++ internal/service/elbv2/target_group.go | 20 -------------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index ebd7a7df1d8..6ac49a89db8 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -3,6 +3,16 @@ package elbv2 +import ( + "time" + + "github.com/aws/aws-sdk-go/service/elbv2" +) + +const ( + propagationTimeout = 2 * time.Minute +) + const ( errCodeValidationError = "ValidationError" @@ -80,3 +90,19 @@ func httpXFFHeaderProcessingMode_Values() []string { httpXFFHeaderProcessingModeRemove, } } + +func healthCheckProtocolEnumValues() []string { + return []string{ + elbv2.ProtocolEnumHttp, + elbv2.ProtocolEnumHttps, + elbv2.ProtocolEnumTcp, + } +} + +func protocolVersionEnumValues() []string { + return []string{ + "GRPC", + "HTTP1", + "HTTP2", + } +} diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 5985a022985..ef5132402b0 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -32,26 +32,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - propagationTimeout = 2 * time.Minute -) - -func healthCheckProtocolEnumValues() []string { - return []string{ - elbv2.ProtocolEnumHttp, - elbv2.ProtocolEnumHttps, - elbv2.ProtocolEnumTcp, - } -} - -func protocolVersionEnumValues() []string { - return []string{ - "GRPC", - "HTTP1", - "HTTP2", - } -} - // @SDKResource("aws_alb_target_group", name="Target Group") // @SDKResource("aws_lb_target_group", name="Target Group") // @Tags(identifierAttribute="id") From ce68ad0fe44a79a050128338633622d71e1d6135 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 14:43:29 -0500 Subject: [PATCH 071/123] r/aws_finspace_kx_dataview: linter fixes --- internal/service/finspace/kx_dataview.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 07bc92180df..9618cd46bc3 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -30,7 +30,6 @@ import ( // @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") // @Tags(identifierAttribute="arn") func ResourceKxDataview() *schema.Resource { - return &schema.Resource{ CreateWithoutTimeout: resourceKxDataviewCreate, ReadWithoutTimeout: resourceKxDataviewRead, @@ -235,7 +234,7 @@ func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta ClientToken: aws.String(id.UniqueId()), } - if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && d.Get("auto_update").(bool) != true { + if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && !d.Get("auto_update").(bool) { in.ChangesetId = aws.String(v.(string)) } @@ -300,7 +299,6 @@ func FindKxDataviewById(ctx context.Context, conn *finspace.Client, id string) ( LastError: err, LastRequest: in, } - } return nil, err } From 0c613614ef8bf613143cc00547214a37123c9fae Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:00:52 -0500 Subject: [PATCH 072/123] r/aws_finspace_kx_cluster: prefer create.AppendDiagError --- internal/service/finspace/kx_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 6a6f26833e2..e00b091ffb4 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -589,11 +589,11 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int } if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } // compose cluster ARN using environment ARN From 25339163223498e9188b4a9e108646f6e5861d60 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:03:03 -0500 Subject: [PATCH 073/123] r/aws_finspace_kx_cluster(doc): markdownlint fixes --- website/docs/r/finspace_kx_cluster.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index f7d59e6d71a..13b06c8e474 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -93,7 +93,7 @@ The following arguments are optional: * `initialization_script` - (Optional) Path to Q program that will be run at launch of a cluster. This is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. * `savedown_storage_configuration` - (Optional) Size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose `type` as RDB. All the data written to this storage space is lost when the cluster node is restarted. See [savedown_storage_configuration](#savedown_storage_configuration). * `scaling_group_configuration` - (Optional) The structure that stores the configuration details of a scaling group. -* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. +* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_scaling_configuration @@ -151,7 +151,7 @@ The database block supports the following arguments: * `database_name` - (Required) Name of the KX database. * `cache_configurations` - (Optional) Configuration details for the disk cache to increase performance reading from a KX database mounted to the cluster. See [cache_configurations](#cache_configurations). * `changeset_id` - (Optional) A unique identifier of the changeset that is associated with the cluster. -* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. +* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. #### cache_configurations @@ -184,7 +184,7 @@ The vpc_configuration block supports the following arguments: * `memory_reservation` - (Required) A reservation of the minimum amount of memory that should be available on the scaling group for a kdb cluster to be successfully placed in a scaling group. * `node_count` - (Required) The number of kdb cluster nodes. * `cpu` - The number of vCPUs that you want to reserve for each node of this kdb cluster on the scaling group host. -* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. +* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. ### tickerplant_log_configuration From eaa4f07d225476dd86cb46e10e231e840da4ea7c Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:04:12 -0600 Subject: [PATCH 074/123] aws_ecr_image: use defined Find func --- internal/service/ecr/image_data_source.go | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 96bf9df4910..42ee4ff20d1 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -5,6 +5,7 @@ package ecr import ( "context" + "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecr" @@ -133,29 +134,24 @@ func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, meta inter imageDetail := imageDetails[0] - params2 := &ecr.DescribeRepositoriesInput{ - RepositoryNames: []*string{imageDetail.RepositoryName}, + repositoryName := aws.StringValue(imageDetail.RepositoryName) + repositoryInput := &ecr.DescribeRepositoriesInput{ + RepositoryNames: aws.StringSlice([]string{repositoryName}), RegistryId: imageDetail.RegistryId, } - var repositoryDetails []*ecr.Repository - err2 := conn.DescribeRepositoriesPages(params2, func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { - repositoryDetails = append(repositoryDetails, page.Repositories...) - return true - }) + repository, err := FindRepository(ctx, conn, repositoryInput) - if err2 != nil { - return sdkdiag.AppendErrorf(diags, "reading ECR repositories: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ECR Images: %s", err) } - repository := repositoryDetails[0] - d.SetId(aws.StringValue(imageDetail.ImageDigest)) d.Set("image_digest", imageDetail.ImageDigest) d.Set("image_pushed_at", imageDetail.ImagePushedAt.Unix()) d.Set("image_size_in_bytes", imageDetail.ImageSizeInBytes) d.Set("image_tags", aws.StringValueSlice(imageDetail.ImageTags)) - d.Set("image_uri", aws.String(aws.StringValue(repository.RepositoryUri)+"@"+aws.StringValue(imageDetail.ImageDigest))) + d.Set("image_uri", fmt.Sprintf("%s@%s", aws.StringValue(repository.RepositoryUri), aws.StringValue(imageDetail.ImageDigest))) d.Set("registry_id", imageDetail.RegistryId) d.Set("repository_name", imageDetail.RepositoryName) From 9f3f49b57d99d4a4fdaf7e18909f36c7799437b0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:06:39 -0500 Subject: [PATCH 075/123] chore: changelog --- .changelog/34831.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changelog/34831.txt diff --git a/.changelog/34831.txt b/.changelog/34831.txt new file mode 100644 index 00000000000..e7c0098c35a --- /dev/null +++ b/.changelog/34831.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments. +``` +```release-note:enhancement +resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional. +``` From 81c0e43bf4575371000fb65a5e8df4bee5ed4a64 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 15:17:43 -0500 Subject: [PATCH 076/123] r/aws_lb_target_group: Tidy up Delete. --- internal/service/elbv2/const.go | 4 +++ internal/service/elbv2/target_group.go | 37 +++++++------------------- 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 6ac49a89db8..111a320a0eb 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -91,6 +91,10 @@ func httpXFFHeaderProcessingMode_Values() []string { } } +const ( + healthCheckPortTrafficPort = "traffic-port" +) + func healthCheckProtocolEnumValues() []string { return []string{ elbv2.ProtocolEnumHttp, diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index ef5132402b0..1f0777d28a2 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -114,7 +114,7 @@ func ResourceTargetGroup() *schema.Resource { "port": { Type: schema.TypeString, Optional: true, - Default: "traffic-port", + Default: healthCheckPortTrafficPort, ValidateFunc: validTargetGroupHealthCheckPort, DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), }, @@ -877,36 +877,17 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - const ( - targetGroupDeleteTimeout = 2 * time.Minute - ) conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - input := &elbv2.DeleteTargetGroupInput{ - TargetGroupArn: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting Target Group (%s): %s", d.Id(), input) - err := retry.RetryContext(ctx, targetGroupDeleteTimeout, func() *retry.RetryError { - _, err := conn.DeleteTargetGroupWithContext(ctx, input) - - if tfawserr.ErrMessageContains(err, "ResourceInUse", "is currently in use by a listener or a rule") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.DeleteTargetGroupWithContext(ctx, input) - } + log.Printf("[DEBUG] Deleting ELBv2 Target Group: %s", d.Id()) + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 2*time.Minute, func() (interface{}, error) { + return conn.DeleteTargetGroupWithContext(ctx, &elbv2.DeleteTargetGroupInput{ + TargetGroupArn: aws.String(d.Id()), + }) + }, elbv2.ErrCodeResourceInUseException, "is currently in use by a listener or a rule") if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Target Group: %s", err) + return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Target Group (%s): %s", d.Id(), err) } return diags @@ -1028,7 +1009,7 @@ func validateSlowStart(v interface{}, k string) (ws []string, errors []error) { func validTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value == "traffic-port" { + if value == healthCheckPortTrafficPort { return } From ddce4ea724808ac1d12ab021d10d31083e127892 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 15:22:41 -0500 Subject: [PATCH 077/123] Tidy up 'findTargetGroup'. --- internal/service/elbv2/target_group.go | 38 ++++++++----------- .../service/elbv2/target_group_data_source.go | 2 +- 2 files changed, 16 insertions(+), 24 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 1f0777d28a2..48c0151be64 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -365,7 +365,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta create.WithConfiguredPrefix(d.Get("name_prefix").(string)), create.WithDefaultPrefix("tf-"), ).Generate() - exist, err := FindTargetGroupByName(ctx, conn, name) + exist, err := findTargetGroupByName(ctx, conn, name) if err != nil && !tfresource.NotFound(err) { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s): %s", name, err) @@ -898,7 +898,7 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* TargetGroupArns: aws.StringSlice([]string{arn}), } - output, err := FindTargetGroup(ctx, conn, input) + output, err := findTargetGroup(ctx, conn, input) if err != nil { return nil, err @@ -914,12 +914,12 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* return output, nil } -func FindTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) (*elbv2.TargetGroup, error) { +func findTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) (*elbv2.TargetGroup, error) { input := &elbv2.DescribeTargetGroupsInput{ Names: aws.StringSlice([]string{name}), } - output, err := FindTargetGroup(ctx, conn, input) + output, err := findTargetGroup(ctx, conn, input) if err != nil { return nil, err @@ -935,7 +935,17 @@ func FindTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) return output, nil } -func FindTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) ([]*elbv2.TargetGroup, error) { +func findTargetGroup(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) (*elbv2.TargetGroup, error) { + output, err := findTargetGroups(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) ([]*elbv2.TargetGroup, error) { var output []*elbv2.TargetGroup err := conn.DescribeTargetGroupsPagesWithContext(ctx, input, func(page *elbv2.DescribeTargetGroupsOutput, lastPage bool) bool { @@ -966,24 +976,6 @@ func FindTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.Descr return output, nil } -func FindTargetGroup(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) (*elbv2.TargetGroup, error) { - output, err := FindTargetGroups(ctx, conn, input) - - if err != nil { - return nil, err - } - - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output[0], nil -} - func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !strings.HasPrefix(value, "/") { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index e8ecfe7e5d5..1dd94c9293c 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -184,7 +184,7 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta input.Names = aws.StringSlice([]string{v.(string)}) } - results, err := FindTargetGroups(ctx, conn, input) + results, err := findTargetGroups(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Groups: %s", err) From 3ae4ffea43f50fc7a66c1c12bea020edd667466b Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:37:30 -0600 Subject: [PATCH 078/123] aws_ecr_image: find most recent --- internal/service/ecr/image_data_source.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 42ee4ff20d1..8932629b1c0 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -102,6 +102,18 @@ func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, meta inter } } + if v, ok := d.Get("most_recent").(bool); ok && v { + if len(input.ImageIds) == 0 { + input.ImageIds = []*ecr.ImageIdentifier{ + { + ImageTag: aws.String("latest"), + }, + } + } else { + input.ImageIds[0].ImageTag = aws.String("latest") + } + } + if v, ok := d.GetOk("registry_id"); ok { input.RegistryId = aws.String(v.(string)) } From 434d9b4c3a25b528e2b158ce0703c340e76a0bed Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:45:32 -0600 Subject: [PATCH 079/123] tweak CHANGELOG entry --- .changelog/24526.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/24526.txt b/.changelog/24526.txt index 7b392ba8fb6..92988183f15 100644 --- a/.changelog/24526.txt +++ b/.changelog/24526.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -data-source/aws_ecr_image: Add image_uri attribute +data-source/aws_ecr_image: Add `image_uri` attribute ``` \ No newline at end of file From 6ca793b90547331e50f124d93e2ec6348036f4f0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 16:08:12 -0500 Subject: [PATCH 080/123] r/aws_finspace_kx_volume(test): add tags test (#34928) --- internal/service/finspace/kx_volume_test.go | 101 ++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index fa80b8b039a..9a3ad657dcf 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -88,6 +88,62 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { }) } +func TestAccFinSpaceKxVolume_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var volume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxVolumeConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -264,3 +320,48 @@ resource "aws_finspace_kx_volume" "test" { } `, rName)) } + +func testAccKxVolumeConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_250" + size = 1200 + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxVolumeConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_250" + size = 1200 + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} From a3a41794e6a48ecc14744ce02e75c5d4c28e8590 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 21:10:26 +0000 Subject: [PATCH 081/123] Update CHANGELOG.md for #34928 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f41aab55250..9d1842bcdde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ FEATURES: * **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_finspace_kx_dataview` ([#34828](https://github.com/hashicorp/terraform-provider-aws/issues/34828)) * **New Resource:** `aws_finspace_kx_scaling_group` ([#34832](https://github.com/hashicorp/terraform-provider-aws/issues/34832)) +* **New Resource:** `aws_finspace_kx_volume` ([#34833](https://github.com/hashicorp/terraform-provider-aws/issues/34833)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: From 91060cc76aa5119a4ba2a9f1924845e49a5a0ad3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 16:39:19 -0500 Subject: [PATCH 082/123] r/aws_lb_target_group: Tidy up Create. --- internal/service/elbv2/const.go | 12 +++-- internal/service/elbv2/target_group.go | 61 ++++++++++++-------------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 111a320a0eb..c771aeccee0 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -103,10 +103,16 @@ func healthCheckProtocolEnumValues() []string { } } +const ( + protocolVersionGRPC = "GRPC" + protocolVersionHTTP1 = "HTTP1" + protocolVersionHTTP2 = "HTTP2" +) + func protocolVersionEnumValues() []string { return []string{ - "GRPC", - "HTTP1", - "HTTP2", + protocolVersionGRPC, + protocolVersionHTTP1, + protocolVersionHTTP2, } } diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 48c0151be64..cb5ff6f3c77 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -383,60 +383,57 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta TargetType: aws.String(d.Get("target_type").(string)), } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { input.Port = aws.Int64(int64(d.Get("port").(int))) - input.Protocol = aws.String(d.Get("protocol").(string)) - switch d.Get("protocol").(string) { + protocol := d.Get("protocol").(string) + input.Protocol = aws.String(protocol) + switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: input.ProtocolVersion = aws.String(d.Get("protocol_version").(string)) } input.VpcId = aws.String(d.Get("vpc_id").(string)) - if d.Get("target_type").(string) == elbv2.TargetTypeEnumIp { - if _, ok := d.GetOk("ip_address_type"); ok { - input.IpAddressType = aws.String(d.Get("ip_address_type").(string)) + if targetType == elbv2.TargetTypeEnumIp { + if v, ok := d.GetOk("ip_address_type"); ok { + input.IpAddressType = aws.String(v.(string)) } } } - if healthChecks := d.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) - - input.HealthCheckEnabled = aws.Bool(healthCheck["enabled"].(bool)) + if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) - input.HealthCheckIntervalSeconds = aws.Int64(int64(healthCheck["interval"].(int))) + input.HealthCheckEnabled = aws.Bool(tfMap["enabled"].(bool)) + input.HealthCheckIntervalSeconds = aws.Int64(int64(tfMap["interval"].(int))) + input.HealthyThresholdCount = aws.Int64(int64(tfMap["healthy_threshold"].(int))) + input.UnhealthyThresholdCount = aws.Int64(int64(tfMap["unhealthy_threshold"].(int))) - input.HealthyThresholdCount = aws.Int64(int64(healthCheck["healthy_threshold"].(int))) - input.UnhealthyThresholdCount = aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))) - t := healthCheck["timeout"].(int) - if t != 0 { - input.HealthCheckTimeoutSeconds = aws.Int64(int64(t)) + if v, ok := tfMap["timeout"].(int); ok && v != 0 { + input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - healthCheckProtocol := healthCheck["protocol"].(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp { - p := healthCheck["path"].(string) - if p != "" { - input.HealthCheckPath = aws.String(p) + protocol := tfMap["protocol"].(string) + if protocol != elbv2.ProtocolEnumTcp { + if v, ok := tfMap["path"].(string); ok && v != "" { + input.HealthCheckPath = aws.String(v) } - m := healthCheck["matcher"].(string) - protocolVersion := d.Get("protocol_version").(string) - if m != "" { - if protocolVersion == "GRPC" { + if v, ok := tfMap["matcher"].(string); ok && v != "" { + if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { input.Matcher = &elbv2.Matcher{ - GrpcCode: aws.String(m), + GrpcCode: aws.String(v), } } else { input.Matcher = &elbv2.Matcher{ - HttpCode: aws.String(m), + HttpCode: aws.String(v), } } } } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - input.HealthCheckPort = aws.String(healthCheck["port"].(string)) - input.HealthCheckProtocol = aws.String(healthCheckProtocol) + + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + input.HealthCheckPort = aws.String(tfMap["port"].(string)) + input.HealthCheckProtocol = aws.String(protocol) } } @@ -461,10 +458,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "creating ELBv2 Target Group (%s): %s", name, err) } - if len(output.TargetGroups) == 0 { - return sdkdiag.AppendErrorf(diags, "creating LB Target Group: no groups returned in response") - } - d.SetId(aws.StringValue(output.TargetGroups[0].TargetGroupArn)) _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (interface{}, error) { From 43d401bce33f9cfd0b401659e9af3383a75647c9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 17:19:26 -0500 Subject: [PATCH 083/123] r/aws_lb_target_group: Add attribute constants. --- internal/service/elbv2/const.go | 95 ++++++++++++++++++++++++++ internal/service/elbv2/target_group.go | 53 +++++--------- 2 files changed, 112 insertions(+), 36 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index c771aeccee0..87ca3b2676e 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -91,6 +91,101 @@ func httpXFFHeaderProcessingMode_Values() []string { } } +// See https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_TargetGroupAttribute.html#API_TargetGroupAttribute_Contents. +const ( + // The following attributes are supported by all load balancers: + targetGroupAttributeDeregistrationDelayTimeoutSeconds = "deregistration_delay.timeout_seconds" + targetGroupAttributeDeregistrationStickinessEnabled = "stickiness.enabled" + targetGroupAttributeDeregistrationStickinessType = "stickiness.enabled" + + // The following attributes are supported by Application Load Balancers and Network Load Balancers: + targetGroupAttributeLoadBalancingCrossZoneEnabled = "load_balancing.cross_zone.enabled" + targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsCount = "target_group_health.dns_failover.minimum_healthy_targets.count" + targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsPercentage = "target_group_health.dns_failover.minimum_healthy_targets.percentage" + targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsCount = "target_group_health.unhealthy_state_routing.minimum_healthy_targets.count" + targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsPercentage = "target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage" + + // The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address: + targetGroupAttributeLoadBalancingAlgorithmType = "load_balancing.algorithm.type" + targetGroupAttributeLoadBalancingAlgorithmAnomalyMitigation = "load_balancing.algorithm.anomaly_mitigation" + targetGroupAttributeSlowStartDurationSeconds = "slow_start.duration_seconds" + targetGroupAttributeStickinessAppCookieCookieName = "stickiness.app_cookie.cookie_name" + targetGroupAttributeStickinessAppCookieDurationSeconds = "stickiness.app_cookie.duration_seconds" + targetGroupAttributeStickinessLBCookieDurationSeconds = "stickiness.lb_cookie.duration_seconds" + + // The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function: + targetGroupAttributeLambdaMultiValueHeadersEnabled = "lambda.multi_value_headers.enabled" + + // The following attributes are supported only by Network Load Balancers: + targetGroupAttributeDeregistrationDelayConnectionTerminationEnabled = "deregistration_delay.connection_termination.enabled" + targetGroupAttributePreserveClientIPEnabled = "preserve_client_ip.enabled" + targetGroupAttributeProxyProtocolV2Enabled = "proxy_protocol_v2.enabled" + targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled = "target_health_state.unhealthy.connection_termination.enabled" + + // The following attributes are supported only by Gateway Load Balancers: + targetGroupAttributeTargetFailoverOnDeregistration = "target_failover.on_deregistration" + targetGroupAttributeTargetFailoverOnUnhealthy = "target_failover.on_unhealthy" +) + +const ( + loadBalancingAlgorithmTypeRoundRobin = "round_robin" + loadBalancingAlgorithmTypeLeastOutstandingRequests = "least_outstanding_requests" + loadBalancingAlgorithmTypeWeightedRandom = "weighted_random" +) + +func loadBalancingAlgorithmType_Values() []string { + return []string{ + loadBalancingAlgorithmTypeRoundRobin, + loadBalancingAlgorithmTypeLeastOutstandingRequests, + // TODO + // loadBalancingAlgorithmTypeWeightedRandom, + } +} + +const ( + loadBalancingCrossZoneEnabledTrue = "true" + loadBalancingCrossZoneEnabledFalse = "false" + loadBalancingCrossZoneEnabledUseLoadBalancerConfiguration = "use_load_balancer_configuration" +) + +func loadBalancingCrossZoneEnabled_Values() []string { + return []string{ + loadBalancingCrossZoneEnabledTrue, + loadBalancingCrossZoneEnabledFalse, + loadBalancingCrossZoneEnabledUseLoadBalancerConfiguration, + } +} + +const ( + stickinessTypeLBCookie = "lb_cookie" // Only for ALBs + stickinessTypeAppCookie = "app_cookie" // Only for ALBs + stickinessTypeSourceIP = "source_ip" // Only for NLBs + stickinessTypeSourceIPDestIP = "source_ip_dest_ip" // Only for GWLBs + stickinessTypeSourceIPDestIPProto = "source_ip_dest_ip_proto" // Only for GWLBs +) + +func stickinessType_Values() []string { + return []string{ + stickinessTypeLBCookie, + stickinessTypeAppCookie, + stickinessTypeSourceIP, + stickinessTypeSourceIPDestIP, + stickinessTypeSourceIPDestIPProto, + } +} + +const ( + targetFailoverRebalance = "rebalance" + targetFailoverNoRebalance = "no_rebalance" +) + +func targetFailover_Values() []string { + return []string{ + targetFailoverRebalance, + targetFailoverNoRebalance, + } +} + const ( healthCheckPortTrafficPort = "traffic-port" ) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index cb5ff6f3c77..df6b14cb5b1 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -156,23 +156,16 @@ func ResourceTargetGroup() *schema.Resource { Default: false, }, "load_balancing_algorithm_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - "round_robin", - "least_outstanding_requests", - }, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(loadBalancingAlgorithmType_Values(), false), }, "load_balancing_cross_zone_enabled": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - "true", - "false", - "use_load_balancer_configuration", - }, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(loadBalancingCrossZoneEnabled_Values(), false), }, "name": { Type: schema.TypeString, @@ -276,15 +269,9 @@ func ResourceTargetGroup() *schema.Resource { Default: true, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "lb_cookie", // Only for ALBs - "app_cookie", // Only for ALBs - "source_ip", // Only for NLBs - "source_ip_dest_ip", // Only for GWLBs - "source_ip_dest_ip_proto", // Only for GWLBs - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(stickinessType_Values(), false), }, }, }, @@ -298,20 +285,14 @@ func ResourceTargetGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "on_deregistration": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "rebalance", - "no_rebalance", - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(targetFailover_Values(), false), }, "on_unhealthy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "rebalance", - "no_rebalance", - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(targetFailover_Values(), false), }, }, }, From 7f04626a6596e1a9ea8df1bc9675ea2a5bf399ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 17:33:51 -0500 Subject: [PATCH 084/123] r/aws_lb_target_group: Tidy up Update. --- internal/service/elbv2/target_group.go | 58 +++++++++++++------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index df6b14cb5b1..8b7c08b0175 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -646,48 +646,46 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) if d.HasChange("health_check") { - var params *elbv2.ModifyTargetGroupInput - healthChecks := d.Get("health_check").([]interface{}) - if len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) + if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) - params = &elbv2.ModifyTargetGroupInput{ + input := &elbv2.ModifyTargetGroupInput{ + HealthCheckEnabled: aws.Bool(tfMap["enabled"].(bool)), + HealthCheckIntervalSeconds: aws.Int64(int64(tfMap["interval"].(int))), + HealthyThresholdCount: aws.Int64(int64(tfMap["healthy_threshold"].(int))), TargetGroupArn: aws.String(d.Id()), - HealthCheckEnabled: aws.Bool(healthCheck["enabled"].(bool)), - HealthCheckIntervalSeconds: aws.Int64(int64(healthCheck["interval"].(int))), - HealthyThresholdCount: aws.Int64(int64(healthCheck["healthy_threshold"].(int))), - UnhealthyThresholdCount: aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))), + UnhealthyThresholdCount: aws.Int64(int64(tfMap["unhealthy_threshold"].(int))), } - t := healthCheck["timeout"].(int) - if t != 0 { - params.HealthCheckTimeoutSeconds = aws.Int64(int64(t)) + if v, ok := tfMap["timeout"].(int); ok && v != 0 { + input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - healthCheckProtocol := healthCheck["protocol"].(string) - protocolVersion := d.Get("protocol_version").(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp && !d.IsNewResource() { - if protocolVersion == "GRPC" { - params.Matcher = &elbv2.Matcher{ - GrpcCode: aws.String(healthCheck["matcher"].(string)), - } - } else { - params.Matcher = &elbv2.Matcher{ - HttpCode: aws.String(healthCheck["matcher"].(string)), + protocol := tfMap["protocol"].(string) + if protocol != elbv2.ProtocolEnumTcp { + if v, ok := tfMap["matcher"].(string); ok { + if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { + input.Matcher = &elbv2.Matcher{ + GrpcCode: aws.String(v), + } + } else { + input.Matcher = &elbv2.Matcher{ + HttpCode: aws.String(v), + } } } - params.HealthCheckPath = aws.String(healthCheck["path"].(string)) + input.HealthCheckPath = aws.String(tfMap["path"].(string)) } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - params.HealthCheckPort = aws.String(healthCheck["port"].(string)) - params.HealthCheckProtocol = aws.String(healthCheckProtocol) + + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + input.HealthCheckPort = aws.String(tfMap["port"].(string)) + input.HealthCheckProtocol = aws.String(protocol) } - } - if params != nil { - _, err := conn.ModifyTargetGroupWithContext(ctx, params) + _, err := conn.ModifyTargetGroupWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s): %s", d.Id(), err) } } } From 399ec5d623675592ac4924d733c198d4a65f82e3 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 01:09:37 +0000 Subject: [PATCH 085/123] Update CHANGELOG.md (Manual Trigger) --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d1842bcdde..e355dbc5b1e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,8 @@ ENHANCEMENTS: * resource/aws_db_instance: Add support for IBM Db2 databases ([#34834](https://github.com/hashicorp/terraform-provider-aws/issues/34834)) * resource/aws_dms_endpoint: Add `elasticsearch_settings.use_new_mapping_type` argument ([#29470](https://github.com/hashicorp/terraform-provider-aws/issues/29470)) * resource/aws_dms_endpoint: Add `postgres_settings` configuration block ([#34724](https://github.com/hashicorp/terraform-provider-aws/issues/34724)) +* resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments. ([#34831](https://github.com/hashicorp/terraform-provider-aws/issues/34831)) +* resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional. ([#34831](https://github.com/hashicorp/terraform-provider-aws/issues/34831)) * resource/aws_lb: Add `connection_logs` configuration block ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * resource/aws_lb: Add plan-time validation that exactly one of either `subnets` or `subnet_mapping` is configured ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Allow the number of `subnet_mapping`s for Application Load Balancers to be changed without recreating the resource ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) From c4f1b8ce3a339d5545942db7605c9b9259105b6f Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 02:17:52 +0000 Subject: [PATCH 086/123] Update CHANGELOG.md after v5.31.0 --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e355dbc5b1e..8a04d9dd4c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ -## 5.31.0 (Unreleased) +## 5.32.0 (Unreleased) +## 5.31.0 (December 15, 2023) FEATURES: From 7cfb2cc4e906cba10f60047b8ea048a3ded65df2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 09:00:56 -0500 Subject: [PATCH 087/123] r/aws_lb_target_group: Add 'expandTargetGroupStickinessAttributes'. --- internal/service/elbv2/const.go | 4 +- internal/service/elbv2/target_group.go | 242 +++++++++++-------------- 2 files changed, 110 insertions(+), 136 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 87ca3b2676e..d4ce150cf1c 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -95,8 +95,8 @@ func httpXFFHeaderProcessingMode_Values() []string { const ( // The following attributes are supported by all load balancers: targetGroupAttributeDeregistrationDelayTimeoutSeconds = "deregistration_delay.timeout_seconds" - targetGroupAttributeDeregistrationStickinessEnabled = "stickiness.enabled" - targetGroupAttributeDeregistrationStickinessType = "stickiness.enabled" + targetGroupAttributeStickinessEnabled = "stickiness.enabled" + targetGroupAttributeStickinessType = "stickiness.type" // The following attributes are supported by Application Load Balancers and Network Load Balancers: targetGroupAttributeLoadBalancingCrossZoneEnabled = "load_balancing.cross_zone.enabled" diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 8b7c08b0175..9ee89eb7a04 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" @@ -358,15 +359,16 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta runtimeValidations(d, &diags) + protocol := d.Get("protocol").(string) + targetType := d.Get("target_type").(string) input := &elbv2.CreateTargetGroupInput{ Name: aws.String(name), Tags: getTagsIn(ctx), - TargetType: aws.String(d.Get("target_type").(string)), + TargetType: aws.String(targetType), } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.Port = aws.Int64(int64(d.Get("port").(int))) - protocol := d.Get("protocol").(string) input.Protocol = aws.String(protocol) switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: @@ -393,8 +395,8 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - protocol := tfMap["protocol"].(string) - if protocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := tfMap["protocol"].(string) + if healthCheckProtocol != elbv2.ProtocolEnumTcp { if v, ok := tfMap["path"].(string); ok && v != "" { input.HealthCheckPath = aws.String(v) } @@ -412,9 +414,9 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta } } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap["port"].(string)) - input.HealthCheckProtocol = aws.String(protocol) + input.HealthCheckProtocol = aws.String(healthCheckProtocol) } } @@ -449,65 +451,69 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Target Group (%s) create: %s", d.Id(), err) } - var attrs []*elbv2.TargetGroupAttribute + var attributes []*elbv2.TargetGroupAttribute - switch d.Get("target_type").(string) { + switch targetType { case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), Value: aws.String(fmt.Sprintf("%d", v)), }) } if v, ok := d.GetOk("load_balancing_algorithm_type"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.algorithm.type"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("load_balancing_cross_zone_enabled"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.cross_zone.enabled"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("preserve_client_ip"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("preserve_client_ip.enabled"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("proxy_protocol_v2"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("proxy_protocol_v2.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } if v, ok := d.GetOk("connection_termination"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } if v, ok := d.GetOk("slow_start"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("slow_start.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", v.(int))), }) } // Only supported for GWLB - if v, ok := d.Get("protocol").(string); ok && v == elbv2.ProtocolEnumGeneve { + if protocol == elbv2.ProtocolEnumGeneve { if v, ok := d.GetOk("target_failover"); ok { failoverBlock := v.([]interface{}) failover := failoverBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_failover.on_deregistration"), Value: aws.String(failover["on_deregistration"].(string)), @@ -526,7 +532,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 { targetHealthStateBlock := v.([]interface{}) targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), @@ -535,64 +541,25 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta } } } - - if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 { - stickinessBlocks := v.([]interface{}) - stickiness := stickinessBlocks[0].(map[string]interface{}) - - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.type"), - Value: aws.String(stickiness["type"].(string)), - }) - - switch d.Get("protocol").(string) { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - switch stickiness["type"].(string) { - case "lb_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.lb_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }) - case "app_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - default: - log.Printf("[WARN] Unexpected stickiness type. Expected lb_cookie or app_cookie, got %s", stickiness["type"].(string)) - } - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("lambda.multi_value_headers.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } } - if len(attrs) > 0 { - params := &elbv2.ModifyTargetGroupAttributesInput{ + if len(attributes) > 0 { + input := &elbv2.ModifyTargetGroupAttributesInput{ + Attributes: attributes, TargetGroupArn: aws.String(d.Id()), - Attributes: attrs, } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, params) + _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } } @@ -645,6 +612,9 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + protocol := d.Get("protocol").(string) + targetType := d.Get("target_type").(string) + if d.HasChange("health_check") { if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { tfMap := v.([]interface{})[0].(map[string]interface{}) @@ -661,8 +631,8 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - protocol := tfMap["protocol"].(string) - if protocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := tfMap["protocol"].(string) + if healthCheckProtocol != elbv2.ProtocolEnumTcp { if v, ok := tfMap["matcher"].(string); ok { if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { input.Matcher = &elbv2.Matcher{ @@ -677,9 +647,9 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckPath = aws.String(tfMap["path"].(string)) } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap["port"].(string)) - input.HealthCheckProtocol = aws.String(protocol) + input.HealthCheckProtocol = aws.String(healthCheckProtocol) } _, err := conn.ModifyTargetGroupWithContext(ctx, input) @@ -690,13 +660,24 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } - var attrs []*elbv2.TargetGroupAttribute + var attributes []*elbv2.TargetGroupAttribute - switch d.Get("target_type").(string) { + switch targetType { case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + if d.HasChange("stickiness") { + if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } else { + attributes = append(attributes, &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessEnabled), + Value: flex.BoolValueToString(false), + }) + } + } + if d.HasChange("deregistration_delay") { if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), Value: aws.String(fmt.Sprintf("%d", v)), }) @@ -704,91 +685,42 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("slow_start") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("slow_start.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", d.Get("slow_start").(int))), }) } if d.HasChange("proxy_protocol_v2") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("proxy_protocol_v2.enabled"), Value: aws.String(strconv.FormatBool(d.Get("proxy_protocol_v2").(bool))), }) } if d.HasChange("connection_termination") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(d.Get("connection_termination").(bool))), }) } if d.HasChange("preserve_client_ip") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("preserve_client_ip.enabled"), Value: aws.String(d.Get("preserve_client_ip").(string)), }) } - if d.HasChange("stickiness") { - stickinessBlocks := d.Get("stickiness").([]interface{}) - if len(stickinessBlocks) == 1 { - stickiness := stickinessBlocks[0].(map[string]interface{}) - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.type"), - Value: aws.String(stickiness["type"].(string)), - }) - - switch d.Get("protocol").(string) { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - switch stickiness["type"].(string) { - case "lb_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.lb_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - case "app_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - default: - log.Printf("[WARN] Unexpected stickiness type. Expected lb_cookie or app_cookie, got %s", stickiness["type"].(string)) - } - } - } else if len(stickinessBlocks) == 0 { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String("false"), - }) - } - } - if d.HasChange("load_balancing_algorithm_type") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.algorithm.type"), Value: aws.String(d.Get("load_balancing_algorithm_type").(string)), }) } if d.HasChange("load_balancing_cross_zone_enabled") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.cross_zone.enabled"), Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), }) @@ -798,7 +730,7 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta targetHealthStateBlock := d.Get("target_health_state").([]interface{}) if len(targetHealthStateBlock) == 1 { targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), @@ -810,7 +742,7 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta failoverBlock := d.Get("target_failover").([]interface{}) if len(failoverBlock) == 1 { failover := failoverBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_failover.on_deregistration"), Value: aws.String(failover["on_deregistration"].(string)), @@ -825,22 +757,23 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta case elbv2.TargetTypeEnumLambda: if d.HasChange("lambda_multi_value_headers_enabled") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("lambda.multi_value_headers.enabled"), Value: aws.String(strconv.FormatBool(d.Get("lambda_multi_value_headers_enabled").(bool))), }) } } - if len(attrs) > 0 { - params := &elbv2.ModifyTargetGroupAttributesInput{ + if len(attributes) > 0 { + input := &elbv2.ModifyTargetGroupAttributesInput{ + Attributes: attributes, TargetGroupArn: aws.String(d.Id()), - Attributes: attrs, } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, params) + _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } } @@ -1003,6 +936,47 @@ func TargetGroupSuffixFromARN(arn *string) string { return "" } +func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil + } + + apiObjects := []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(targetGroupAttributeStickinessEnabled), + Value: flex.BoolValueToString(tfMap["enabled"].(bool)), + }, + { + Key: aws.String(targetGroupAttributeStickinessType), + Value: aws.String(tfMap["type"].(string)), + }, + } + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + switch stickinessType := tfMap["type"].(string); stickinessType { + case stickinessTypeLBCookie: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessLBCookieDurationSeconds), + Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), + }) + case stickinessTypeAppCookie: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessAppCookieCookieName), + Value: aws.String(tfMap["cookie_name"].(string)), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessAppCookieDurationSeconds), + Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), + }) + } + } + + return apiObjects +} + // flattenTargetGroupResource takes a *elbv2.TargetGroup and populates all respective resource fields. func flattenTargetGroupResource(ctx context.Context, d *schema.ResourceData, meta interface{}, targetGroup *elbv2.TargetGroup) error { conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) From e23d2f2bee92cf3520c9490b87590ef0e20ce77f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:24:27 -0500 Subject: [PATCH 088/123] r/aws_lb_target_group: Tidy up Read. --- internal/service/elbv2/target_group.go | 242 +++++++++++++------------ 1 file changed, 127 insertions(+), 115 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 9ee89eb7a04..2a95c61a3a8 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -584,9 +584,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (interface{}, error) { - return FindTargetGroupByARN(ctx, conn, d.Id()) - }, d.IsNewResource()) + targetGroup, err := FindTargetGroupByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group %s not found, removing from state", d.Id()) @@ -602,9 +600,108 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i runtimeValidations(d, &diags) } - if err := flattenTargetGroupResource(ctx, d, meta, outputRaw.(*elbv2.TargetGroup)); err != nil { - return sdkdiag.AppendFromErr(diags, err) + targetType := aws.StringValue(targetGroup.TargetType) + + d.Set("arn", targetGroup.TargetGroupArn) + d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) + d.Set("ip_address_type", targetGroup.IpAddressType) + d.Set("name", targetGroup.TargetGroupName) + d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) + d.Set("target_type", targetType) + + if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) + } + + if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { + d.Set("port", targetGroup.Port) + } + if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { + d.Set("protocol", targetGroup.Protocol) + } + if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { + d.Set("protocol_version", targetGroup.ProtocolVersion) + } + if _, ok := d.GetOk("vpc_id"); targetGroup.VpcId != nil || ok { + d.Set("vpc_id", targetGroup.VpcId) + } + + attributes, err := findTargetGroupAttributesByARN(ctx, conn, d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) + } + + for _, attr := range attributes { + switch aws.StringValue(attr.Key) { + case "deregistration_delay.timeout_seconds": + d.Set("deregistration_delay", attr.Value) + case "lambda.multi_value_headers.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("lambda_multi_value_headers_enabled", enabled) + case "proxy_protocol_v2.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("proxy_protocol_v2", enabled) + case "deregistration_delay.connection_termination.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("connection_termination", enabled) + case "slow_start.duration_seconds": + slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) + } + d.Set("slow_start", slowStart) + case "load_balancing.algorithm.type": + loadBalancingAlgorithm := aws.StringValue(attr.Value) + d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) + case "load_balancing.cross_zone.enabled": + loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) + d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) + case "preserve_client_ip.enabled": + _, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("preserve_client_ip", attr.Value) + } + } + + stickinessAttr, err := flattenTargetGroupStickiness(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) + } + + if err := d.Set("stickiness", stickinessAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) + } + + targetHealthStateAttr, err := flattenTargetHealthState(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening target health state: %s", err) + } + if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target health state: %s", err) + } + + // Set target failover attributes for GWLB + targetFailoverAttr := flattenTargetGroupFailover(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening target failover: %s", err) + } + + if err := d.Set("target_failover", targetFailoverAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target failover: %s", err) } + return diags } @@ -881,6 +978,31 @@ func findTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.Descr return output, nil } +func findTargetGroupAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) ([]*elbv2.TargetGroupAttribute, error) { + input := &elbv2.DescribeTargetGroupAttributesInput{ + TargetGroupArn: aws.String(arn), + } + + output, err := conn.DescribeTargetGroupAttributesWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Attributes, nil +} + func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !strings.HasPrefix(value, "/") { @@ -977,116 +1099,6 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco return apiObjects } -// flattenTargetGroupResource takes a *elbv2.TargetGroup and populates all respective resource fields. -func flattenTargetGroupResource(ctx context.Context, d *schema.ResourceData, meta interface{}, targetGroup *elbv2.TargetGroup) error { - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - - targetType := aws.StringValue(targetGroup.TargetType) - - d.Set("arn", targetGroup.TargetGroupArn) - d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) - d.Set("ip_address_type", targetGroup.IpAddressType) - d.Set("name", targetGroup.TargetGroupName) - d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) - d.Set("target_type", targetType) - - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { - return fmt.Errorf("setting health_check: %w", err) - } - - if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { - d.Set("port", targetGroup.Port) - } - if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { - d.Set("protocol", targetGroup.Protocol) - } - if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { - d.Set("protocol_version", targetGroup.ProtocolVersion) - } - if _, ok := d.GetOk("vpc_id"); targetGroup.VpcId != nil || ok { - d.Set("vpc_id", targetGroup.VpcId) - } - - attrResp, err := conn.DescribeTargetGroupAttributesWithContext(ctx, &elbv2.DescribeTargetGroupAttributesInput{ - TargetGroupArn: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("retrieving Target Group Attributes: %w", err) - } - - for _, attr := range attrResp.Attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.timeout_seconds": - d.Set("deregistration_delay", attr.Value) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } - - stickinessAttr, err := flattenTargetGroupStickiness(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening stickiness: %w", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { - return fmt.Errorf("setting stickiness: %w", err) - } - - targetHealthStateAttr, err := flattenTargetHealthState(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening target health state: %w", err) - } - if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { - return fmt.Errorf("setting target health state: %w", err) - } - - // Set target failover attributes for GWLB - targetFailoverAttr := flattenTargetGroupFailover(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening target failover: %w", err) - } - - if err := d.Set("target_failover", targetFailoverAttr); err != nil { - return fmt.Errorf("setting target failover: %w", err) - } - - return nil -} - func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { if len(attributes) == 0 { return []interface{}{}, nil From 5f091f8df7d7159083f4536158270c42361068bf Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 09:33:26 -0600 Subject: [PATCH 089/123] chore: tweak doc fmt --- website/docs/d/ecr_image.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/ecr_image.html.markdown b/website/docs/d/ecr_image.html.markdown index 92886386cce..398af3f40cd 100644 --- a/website/docs/d/ecr_image.html.markdown +++ b/website/docs/d/ecr_image.html.markdown @@ -3,7 +3,7 @@ subcategory: "ECR (Elastic Container Registry)" layout: "aws" page_title: "AWS: aws_ecr_image" description: |- - Provides details about an ECR Image + Provides details about an ECR Image --- # Data Source: aws_ecr_image From c3513572b1a02bc5899bf3e100f31e115420251a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:50:08 -0500 Subject: [PATCH 090/123] r/aws_lb_target_group: Tidy up 'flattenTargetGroupStickinessAttributes'. --- internal/service/elbv2/target_group.go | 91 +++++++++---------- .../service/elbv2/target_group_data_source.go | 24 ++--- 2 files changed, 51 insertions(+), 64 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 2a95c61a3a8..a0a8d0ce229 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -600,13 +600,12 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i runtimeValidations(d, &diags) } - targetType := aws.StringValue(targetGroup.TargetType) - d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) d.Set("ip_address_type", targetGroup.IpAddressType) d.Set("name", targetGroup.TargetGroupName) d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) + targetType := aws.StringValue(targetGroup.TargetType) d.Set("target_type", targetType) if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { @@ -616,8 +615,10 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { d.Set("port", targetGroup.Port) } + var protocol string if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { - d.Set("protocol", targetGroup.Protocol) + protocol = aws.StringValue(targetGroup.Protocol) + d.Set("protocol", protocol) } if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { d.Set("protocol_version", targetGroup.ProtocolVersion) @@ -675,12 +676,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i } } - stickinessAttr, err := flattenTargetGroupStickiness(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } @@ -1099,6 +1095,42 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco return apiObjects } +func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + var stickinessType string + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeStickinessEnabled: + tfMap["enabled"] = flex.StringToBoolValue(v) + case targetGroupAttributeStickinessType: + stickinessType = aws.StringValue(v) + tfMap["type"] = stickinessType + } + } + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + for _, apiObject := range apiObjects { + k, v := aws.StringValue(apiObject.Key), apiObject.Value + switch { + case k == targetGroupAttributeStickinessLBCookieDurationSeconds && stickinessType == stickinessTypeLBCookie: + tfMap["cookie_duration"] = flex.StringToIntValue(v) + case k == targetGroupAttributeStickinessAppCookieCookieName && stickinessType == stickinessTypeAppCookie: + tfMap["cookie_name"] = aws.StringValue(v) + case k == targetGroupAttributeStickinessAppCookieDurationSeconds && stickinessType == stickinessTypeAppCookie: + tfMap["cookie_duration"] = flex.StringToIntValue(v) + } + } + } + + return tfMap +} + func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { if len(attributes) == 0 { return []interface{}{}, nil @@ -1139,47 +1171,6 @@ func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []inte return []interface{}{m} } -func flattenTargetGroupStickiness(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { - if len(attributes) == 0 { - return []interface{}{}, nil - } - - m := make(map[string]interface{}) - - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "stickiness.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.enabled to bool: %s", aws.StringValue(attr.Value)) - } - m["enabled"] = enabled - case "stickiness.type": - m["type"] = aws.StringValue(attr.Value) - case "stickiness.lb_cookie.duration_seconds": - if sType, ok := m["type"].(string); !ok || sType == "lb_cookie" { - duration, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.lb_cookie.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - m["cookie_duration"] = duration - } - case "stickiness.app_cookie.cookie_name": - m["cookie_name"] = aws.StringValue(attr.Value) - case "stickiness.app_cookie.duration_seconds": - if sType, ok := m["type"].(string); !ok || sType == "app_cookie" { - duration, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.app_cookie.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - m["cookie_duration"] = duration - } - } - } - - return []interface{}{m}, nil -} - func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { healthCheck := make(map[string]any) if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 1dd94c9293c..85b4dd1b326 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -232,24 +232,25 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } + var protocol string if v, _ := d.Get("target_type").(string); v != elbv2.TargetTypeEnumLambda { - d.Set("vpc_id", targetGroup.VpcId) d.Set("port", targetGroup.Port) - d.Set("protocol", targetGroup.Protocol) + protocol = aws.StringValue(targetGroup.Protocol) + d.Set("protocol", protocol) + d.Set("vpc_id", targetGroup.VpcId) } - switch d.Get("protocol").(string) { + switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: d.Set("protocol_version", targetGroup.ProtocolVersion) } - attrResp, err := conn.DescribeTargetGroupAttributesWithContext(ctx, &elbv2.DescribeTargetGroupAttributesInput{ - TargetGroupArn: aws.String(d.Id()), - }) + attributes, err := findTargetGroupAttributesByARN(ctx, conn, d.Id()) + if err != nil { - return sdkdiag.AppendErrorf(diags, "retrieving Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } - for _, attr := range attrResp.Attributes { + for _, attr := range attributes { switch aws.StringValue(attr.Key) { case "deregistration_delay.connection_termination.enabled": enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) @@ -296,12 +297,7 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta } } - stickinessAttr, err := flattenTargetGroupStickiness(attrResp.Attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } From 3d890581eeb43ef5a42ee97e1a8ff2a1fa6ec780 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:57:48 -0500 Subject: [PATCH 091/123] Fixup 'TestAccELBV2TargetGroup_Stickiness_updateStickinessType'. --- internal/service/elbv2/target_group_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 5a5f81c053a..602b7188380 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1545,6 +1545,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", ""), resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), @@ -1596,7 +1597,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", ""), resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), From 36f3c805c9297e1ce1cbf11aa974c02e5383f52e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:00:07 -0500 Subject: [PATCH 092/123] Tweak CHANGELOG entry. --- .changelog/31436.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/31436.txt b/.changelog/31436.txt index c36d19086b4..cd5e71e36c0 100644 --- a/.changelog/31436.txt +++ b/.changelog/31436.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_lb_target_group: Persist `stickiness.app_cookie.cookie_name` across changes between app_cookie and lb_cookie ALB stickiness +resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ``` \ No newline at end of file From cbd209e22b6051b9642b51ab96166336794cc58b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:26:58 -0500 Subject: [PATCH 093/123] autoflex: Add block key map --- internal/framework/flex/autoflex.go | 5 +++++ internal/framework/flex/autoflex_test.go | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index 69cd93c72d5..fda37eb9b05 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -18,6 +18,7 @@ type ResourcePrefixCtxKey string const ( ResourcePrefix ResourcePrefixCtxKey = "RESOURCE_PREFIX" ResourcePrefixRecurse ResourcePrefixCtxKey = "RESOURCE_PREFIX_RECURSE" + BlockKeyMap = "TFBlockKeyMap" ) // Expand = TF --> AWS @@ -94,6 +95,10 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle if fieldName == "Tags" { continue // Resource tags are handled separately. } + if fieldName == BlockKeyMap { + continue + } + toFieldVal := findFieldFuzzy(ctx, fieldName, valTo, valFrom) if !toFieldVal.IsValid() { continue // Corresponding field not found in to. diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 0c86261d0d6..491874db027 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -297,3 +297,26 @@ type TestFlexTF18 struct { Field5 fwtypes.MapValueOf[types.String] `tfsdk:"field5"` Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` } + +type TestFlexBlockKeyMapTF01 struct { + BlockMap fwtypes.ListNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +} + +type TestFlexBlockKeyMapTF02 struct { + TFBlockKeyMap types.String `tfsdk:"block_key_map"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` +} + +type TestFlexBlockKeyMapAWS01 struct { + BlockMap map[string]TestFlexBlockKeyMapAWS02 +} + +type TestFlexBlockKeyMapAWS02 struct { + Attr1 string + Attr2 string +} + +type TestFlexBlockKeyMapAWS03 struct { + BlockMap map[string]*TestFlexBlockKeyMapAWS02 +} From 9dfa74c25eb60906709728dfa0d139b6fd3d289c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:27:51 -0500 Subject: [PATCH 094/123] autoflex/expand: Allow expanding block key maps --- internal/framework/flex/auto_expand.go | 92 ++++++++++++++++++++- internal/framework/flex/auto_expand_test.go | 90 ++++++++++++++++++++ 2 files changed, 181 insertions(+), 1 deletion(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index 9709ff6bb9f..36b8d10fe1e 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -525,11 +525,27 @@ func (expander autoExpander) nestedObject(ctx context.Context, vFrom fwtypes.Nes return diags } + case reflect.Map: + switch tElem := tTo.Elem(); tElem.Kind() { + case reflect.Struct: + // + // types.List(OfObject) -> map[string]struct + // + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + return diags + case reflect.Ptr: + // + // types.List(OfObject) -> map[string]*struct + // + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + return diags + } + case reflect.Slice: switch tElem := tTo.Elem(); tElem.Kind() { case reflect.Struct: // - // types.List(OfObject) -> []struct. + // types.List(OfObject) -> []struct // diags.Append(expander.nestedObjectToSlice(ctx, vFrom, tTo, tElem, vTo)...) return diags @@ -614,6 +630,51 @@ func (expander autoExpander) nestedObjectToSlice(ctx context.Context, vFrom fwty return diags } +// nestedKeyObjectToMap copies a Plugin Framework NestedObjectValue to a compatible AWS API map[string]struct value. +func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tSlice, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + // Get the nested Objects as a slice. + from, d := vFrom.ToObjectSlice(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + if tElem.Kind() == reflect.Ptr { + tElem = tElem.Elem() + } + + // Create a new target slice and expand each element. + f := reflect.ValueOf(from) + m := reflect.MakeMap(vTo.Type()) + for i := 0; i < f.Len(); i++ { + // Create a new target structure and walk its fields. + target := reflect.New(tElem) + diags.Append(autoFlexConvertStruct(ctx, f.Index(i).Interface(), target.Interface(), expander)...) + if diags.HasError() { + return diags + } + + key, d := blockKeyMap(ctx, f.Index(i).Interface()) + diags.Append(d...) + if diags.HasError() { + return diags + } + + // Set value (or pointer) in the target map. + if vTo.Type().Elem().Kind() == reflect.Struct { + m.SetMapIndex(key, target.Elem()) + } else { + m.SetMapIndex(key, target) + } + } + + vTo.Set(m) + + return diags +} + // objectMap copies a Plugin Framework ObjectMapValue value to a compatible AWS API value. func (expander autoExpander) objectMap(ctx context.Context, vFrom fwtypes.ObjectMapValue, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -684,3 +745,32 @@ func (expander autoExpander) mappedObjectToStruct(ctx context.Context, vFrom fwt return diags } + +// blockKeyMap takes a struct and extracts the value of the `key` +func blockKeyMap(ctx context.Context, from any) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + valFrom := reflect.ValueOf(from) + if kind := valFrom.Kind(); kind == reflect.Ptr { + valFrom = valFrom.Elem() + } + + for i, typFrom := 0, valFrom.Type(); i < typFrom.NumField(); i++ { + field := typFrom.Field(i) + if field.PkgPath != "" { + continue // Skip unexported fields. + } + + // go from StringValue to string + if field.Name == BlockKeyMap { + if v, ok := valFrom.Field(i).Interface().(basetypes.StringValue); ok { + return reflect.ValueOf(v.ValueString()), diags + } + return valFrom.Field(i), diags + } + } + + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + + return reflect.Zero(reflect.TypeOf("")), diags +} diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index e5b8c39aa22..4781197f00f 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -575,6 +575,96 @@ func TestExpandGeneric(t *testing.T) { }, }, }, + { + TestName: "block key map", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + { + TestName: "block key map ptr source", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + { + TestName: "block key map ptr both", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS03{}, + WantTarget: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestTF01{ From 9182ec4523a5e88fb51f267e19da68ed4defc7e6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:28:09 -0500 Subject: [PATCH 095/123] autoflex/flatten: Allow flattening block key maps --- internal/framework/flex/auto_flatten.go | 129 +++++++++++++++++++ internal/framework/flex/auto_flatten_test.go | 90 +++++++++++++ 2 files changed, 219 insertions(+) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 36859d08985..898888243f4 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -508,6 +508,14 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT switch tMapElem := vFrom.Type().Elem(); tMapElem.Kind() { case reflect.Struct: switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.MapTypable: // // map[string]struct -> fwtypes.ObjectMapOf[Object] @@ -565,8 +573,21 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT diags.Append(flattener.structMapToObjectMap(ctx, vFrom, tTo, vTo)...) return diags } + + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case reflect.String: switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.MapTypable: // // map[string]*string -> types.Map(OfString). @@ -671,6 +692,76 @@ func (flattener autoFlattener) structMapToObjectMap(ctx context.Context, vFrom r return diags } +func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + val, d := tTo.NullValue(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + return diags + } + + n := vFrom.Len() + to, d := tTo.NewObjectSlice(ctx, n, n) + diags.Append(d...) + if diags.HasError() { + return diags + } + + t := reflect.ValueOf(to) + + //tStruct := t.Type().Elem() + //if tStruct.Kind() == reflect.Ptr { + // tStruct = tStruct.Elem() + //} + + i := 0 + for _, key := range vFrom.MapKeys() { + //target := reflect.New(tStruct) + target, d := tTo.NewObjectPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + fromInterface := vFrom.MapIndex(key).Interface() + if vFrom.MapIndex(key).Kind() == reflect.Ptr { + fromInterface = vFrom.MapIndex(key).Elem().Interface() + } + + diags.Append(autoFlexConvertStruct(ctx, fromInterface, target, flattener)...) + if diags.HasError() { + return diags + } + + d = blockKeyMapSet(ctx, target, key.String()) + diags.Append(d...) + + t.Index(i).Set(reflect.ValueOf(target)) + i++ + //if t.Type().Elem().Kind() == reflect.Struct { + // t.SetMapIndex(key, target.Elem()) + //} else { + // t.SetMapIndex(key, target) + //} + } + + val, d := tTo.ValueFromObjectSlice(ctx, to) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + + return diags +} + // structToNestedObject copies an AWS API struct value to a compatible Plugin Framework NestedObjectValue value. func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom reflect.Value, isNullFrom bool, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -758,3 +849,41 @@ func (flattener autoFlattener) sliceOfStructNestedObject(ctx context.Context, vF vTo.Set(reflect.ValueOf(val)) return diags } + +// blockKeyMapSet takes a struct and assigns the value of the `key` +func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { + var diags diag.Diagnostics + + valTo := reflect.ValueOf(to) + if kind := valTo.Kind(); kind == reflect.Ptr { + valTo = valTo.Elem() + } + + if valTo.Kind() != reflect.Struct { + diags.AddError("AutoFlEx", fmt.Sprintf("wrong type (%T), expected struct", valTo)) + return diags + } + + for i, typTo := 0, valTo.Type(); i < typTo.NumField(); i++ { + field := typTo.Field(i) + if field.PkgPath != "" { + continue // Skip unexported fields. + } + + // go to StringValue to string + if field.Name != BlockKeyMap { + continue + } + + if _, ok := valTo.Field(i).Interface().(basetypes.StringValue); ok { + valTo.Field(i).Set(reflect.ValueOf(basetypes.NewStringValue(key))) + return diags + } + + return diags + } + + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + + return diags +} diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index a5a2b9f424b..6115d01af4e 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -747,6 +747,96 @@ func TestFlattenGeneric(t *testing.T) { }), }, }, + { + TestName: "block key map", + Source: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, + { + TestName: "block key map ptr source", + Source: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, + { + TestName: "block key map ptr both", + Source: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestAWS01{ From 73e0f43d8759cc04b5da9e9d2c5a2b508b951c81 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:32:40 -0500 Subject: [PATCH 096/123] autoflex/flatten: Fix comment --- internal/framework/flex/auto_flatten.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 898888243f4..0e5450718c5 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -870,7 +870,6 @@ func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { continue // Skip unexported fields. } - // go to StringValue to string if field.Name != BlockKeyMap { continue } From 6c80e0370084ab8d93aa9dcde62f77e9941e64ea Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:13:32 -0500 Subject: [PATCH 097/123] r/aws_lb_target_group: Tidy up 'flattenTargetGroupHealthStateAttributes'. --- internal/service/elbv2/target_group.go | 88 ++++++++++++-------------- 1 file changed, 42 insertions(+), 46 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index a0a8d0ce229..92e49af41e6 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -459,6 +459,10 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), @@ -525,22 +529,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta ) } } - - // Only supported for TCP & TLS protocols - if v, ok := d.Get("protocol").(string); ok { - if v == elbv2.ProtocolEnumTcp || v == elbv2.ProtocolEnumTls { - if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 { - targetHealthStateBlock := v.([]interface{}) - targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), - }, - ) - } - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -680,12 +668,8 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } - targetHealthStateAttr, err := flattenTargetHealthState(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening target health state: %s", err) - } - if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target health state: %s", err) + if err := d.Set("target_health_state", []interface{}{flattenTargetGroupHealthStateAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) } // Set target failover attributes for GWLB @@ -768,6 +752,12 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange("target_health_state") { + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + } + if d.HasChange("deregistration_delay") { if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -819,18 +809,6 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta }) } - if d.HasChange("target_health_state") { - targetHealthStateBlock := d.Get("target_health_state").([]interface{}) - if len(targetHealthStateBlock) == 1 { - targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), - }) - } - } - if d.HasChange("target_failover") { failoverBlock := d.Get("target_failover").([]interface{}) if len(failoverBlock) == 1 { @@ -1131,25 +1109,43 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri return tfMap } -func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { - if len(attributes) == 0 { - return []interface{}{}, nil +func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil } - m := make(map[string]interface{}) + var apiObjects []*elbv2.TargetGroupAttribute - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "target_health_state.unhealthy.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting target_health_state.unhealthy.connection_termination to bool: %s", aws.StringValue(attr.Value)) + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), + Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + }) + } + + return apiObjects +} + +func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: + tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) } - m["enable_unhealthy_connection_termination"] = enabled } } - return []interface{}{m}, nil + return tfMap } func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []interface{} { From 5c710890bd3360741257ecfaf66c726d02d6606f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:24:02 -0500 Subject: [PATCH 098/123] r/aws_lb_target_group: Tidy up 'flattenTargetGroupTargetFailoverAttributes'. --- internal/service/elbv2/target_group.go | 144 ++++++++++++------------- 1 file changed, 69 insertions(+), 75 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 92e49af41e6..6418067aba7 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -459,8 +459,12 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } + if v, ok := d.GetOk("target_failover"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupTargetFailoverAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { @@ -511,24 +515,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta Value: aws.String(fmt.Sprintf("%d", v.(int))), }) } - - // Only supported for GWLB - if protocol == elbv2.ProtocolEnumGeneve { - if v, ok := d.GetOk("target_failover"); ok { - failoverBlock := v.([]interface{}) - failover := failoverBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_deregistration"), - Value: aws.String(failover["on_deregistration"].(string)), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_unhealthy"), - Value: aws.String(failover["on_unhealthy"].(string)), - }, - ) - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -621,6 +607,18 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) + } + + if err := d.Set("target_failover", []interface{}{flattenTargetGroupTargetFailoverAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_failover: %s", err) + } + + if err := d.Set("target_health_state", []interface{}{flattenTargetGroupTargetHealthStateAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) + } + for _, attr := range attributes { switch aws.StringValue(attr.Key) { case "deregistration_delay.timeout_seconds": @@ -664,24 +662,6 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i } } - if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) - } - - if err := d.Set("target_health_state", []interface{}{flattenTargetGroupHealthStateAttributes(attributes, protocol)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) - } - - // Set target failover attributes for GWLB - targetFailoverAttr := flattenTargetGroupFailover(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening target failover: %s", err) - } - - if err := d.Set("target_failover", targetFailoverAttr); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target failover: %s", err) - } - return diags } @@ -752,9 +732,15 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange("target_failover") { + if v, ok := d.GetOk("target_failover"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupTargetFailoverAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + } + if d.HasChange("target_health_state") { if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } } @@ -808,24 +794,6 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), }) } - - if d.HasChange("target_failover") { - failoverBlock := d.Get("target_failover").([]interface{}) - if len(failoverBlock) == 1 { - failover := failoverBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_deregistration"), - Value: aws.String(failover["on_deregistration"].(string)), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_unhealthy"), - Value: aws.String(failover["on_unhealthy"].(string)), - }, - ) - } - } - case elbv2.TargetTypeEnumLambda: if d.HasChange("lambda_multi_value_headers_enabled") { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -1109,7 +1077,7 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri return tfMap } -func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { +func expandTargetGroupTargetFailoverAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { if tfMap == nil { return nil } @@ -1117,18 +1085,22 @@ func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protoc var apiObjects []*elbv2.TargetGroupAttribute switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case elbv2.ProtocolEnumGeneve: apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), - Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + Key: aws.String(targetGroupAttributeTargetFailoverOnDeregistration), + Value: aws.String(tfMap["on_deregistration"].(string)), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetFailoverOnUnhealthy), + Value: aws.String(tfMap["on_unhealthy"].(string)), }) } return apiObjects } -func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { +func flattenTargetGroupTargetFailoverAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1136,11 +1108,13 @@ func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttr tfMap := map[string]interface{}{} switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case elbv2.ProtocolEnumGeneve: for _, apiObject := range apiObjects { switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { - case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: - tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) + case targetGroupAttributeTargetFailoverOnDeregistration: + tfMap["on_deregistration"] = aws.StringValue(v) + case targetGroupAttributeTargetFailoverOnUnhealthy: + tfMap["on_unhealthy"] = aws.StringValue(v) } } } @@ -1148,23 +1122,43 @@ func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttr return tfMap } -func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []interface{} { - if len(attributes) == 0 { - return []interface{}{} +func expandTargetGroupTargetHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil } - m := make(map[string]interface{}) + var apiObjects []*elbv2.TargetGroupAttribute - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "target_failover.on_deregistration": - m["on_deregistration"] = aws.StringValue(attr.Value) - case "target_failover.on_unhealthy": - m["on_unhealthy"] = aws.StringValue(attr.Value) + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), + Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + }) + } + + return apiObjects +} + +func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: + tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) + } } } - return []interface{}{m} + return tfMap } func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { From d04f929845e551669413f1a9aed54bea0822a354 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:56:36 -0500 Subject: [PATCH 099/123] Add 'flex.Int64ValueToString'. --- internal/flex/flex.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 34c417583b1..171c2e120bc 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -305,6 +305,11 @@ func IntValueToString(v int) *string { return aws.String(strconv.Itoa(v)) } +// Int64ValueToString converts a Go int64 value to a string pointer. +func Int64ValueToString(v int64) *string { + return aws.String(strconv.FormatInt(v, 10)) +} + // StringToIntValue converts a string pointer to a Go int value. // Invalid integer strings are converted to 0. func StringToIntValue(v *string) int { From 2ab3f1371ef67b8649e4ee5702c279a2d6cf450c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:04:20 -0500 Subject: [PATCH 100/123] r/aws_lb_target_group: Add and use 'targetGroupAttributeMap'. --- internal/service/elbv2/target_group.go | 321 +++++++++++++------------ 1 file changed, 165 insertions(+), 156 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 6418067aba7..d523f6edfd9 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -31,6 +31,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "golang.org/x/exp/slices" ) // @SDKResource("aws_alb_target_group", name="Target Group") @@ -466,64 +467,10 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } - - if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.timeout_seconds"), - Value: aws.String(fmt.Sprintf("%d", v)), - }) - } - - if v, ok := d.GetOk("load_balancing_algorithm_type"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.algorithm.type"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("load_balancing_cross_zone_enabled"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.cross_zone.enabled"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("preserve_client_ip"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("preserve_client_ip.enabled"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("proxy_protocol_v2"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("proxy_protocol_v2.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } - - if v, ok := d.GetOk("connection_termination"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } - - if v, ok := d.GetOk("slow_start"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("slow_start.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", v.(int))), - }) - } - case elbv2.TargetTypeEnumLambda: - if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("lambda.multi_value_headers.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } } + attributes = append(attributes, targetGroupAttributes.expand(d, targetType, false)...) + if len(attributes) > 0 { input := &elbv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, @@ -619,48 +566,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) } - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.timeout_seconds": - d.Set("deregistration_delay", attr.Value) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } + targetGroupAttributes.flatten(d, targetType, attributes) return diags } @@ -743,66 +649,10 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } } - - if d.HasChange("deregistration_delay") { - if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.timeout_seconds"), - Value: aws.String(fmt.Sprintf("%d", v)), - }) - } - } - - if d.HasChange("slow_start") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("slow_start.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", d.Get("slow_start").(int))), - }) - } - - if d.HasChange("proxy_protocol_v2") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("proxy_protocol_v2.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("proxy_protocol_v2").(bool))), - }) - } - - if d.HasChange("connection_termination") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("connection_termination").(bool))), - }) - } - - if d.HasChange("preserve_client_ip") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("preserve_client_ip.enabled"), - Value: aws.String(d.Get("preserve_client_ip").(string)), - }) - } - - if d.HasChange("load_balancing_algorithm_type") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.algorithm.type"), - Value: aws.String(d.Get("load_balancing_algorithm_type").(string)), - }) - } - - if d.HasChange("load_balancing_cross_zone_enabled") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.cross_zone.enabled"), - Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), - }) - } - case elbv2.TargetTypeEnumLambda: - if d.HasChange("lambda_multi_value_headers_enabled") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("lambda.multi_value_headers.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("lambda_multi_value_headers_enabled").(bool))), - }) - } } + attributes = append(attributes, targetGroupAttributes.expand(d, targetType, true)...) + if len(attributes) > 0 { input := &elbv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, @@ -837,6 +687,165 @@ func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta return diags } +type targetGroupAttributeInfo struct { + apiAttributeKey string + tfType schema.ValueType + tfNullableType schema.ValueType + targetTypesSupported []string +} + +type targetGroupAttributeMap map[string]targetGroupAttributeInfo + +var targetGroupAttributes = targetGroupAttributeMap(map[string]targetGroupAttributeInfo{ + "connection_termination": { + apiAttributeKey: targetGroupAttributeDeregistrationDelayConnectionTerminationEnabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "deregistration_delay": { + apiAttributeKey: targetGroupAttributeDeregistrationDelayTimeoutSeconds, + tfType: schema.TypeString, + tfNullableType: schema.TypeInt, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "lambda_multi_value_headers_enabled": { + apiAttributeKey: targetGroupAttributeLambdaMultiValueHeadersEnabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumLambda}, + }, + "load_balancing_algorithm_type": { + apiAttributeKey: targetGroupAttributeLoadBalancingAlgorithmType, + tfType: schema.TypeString, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "load_balancing_cross_zone_enabled": { + apiAttributeKey: targetGroupAttributeLoadBalancingCrossZoneEnabled, + tfType: schema.TypeString, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "preserve_client_ip": { + apiAttributeKey: targetGroupAttributePreserveClientIPEnabled, + tfType: schema.TypeString, + tfNullableType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "proxy_protocol_v2": { + apiAttributeKey: targetGroupAttributeProxyProtocolV2Enabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "slow_start": { + apiAttributeKey: targetGroupAttributeSlowStartDurationSeconds, + tfType: schema.TypeInt, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, +}) + +func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType string, update bool) []*elbv2.TargetGroupAttribute { + var apiObjects []*elbv2.TargetGroupAttribute + + for tfAttributeName, attributeInfo := range m { + if update && !d.HasChange(tfAttributeName) { + continue + } + + if !slices.Contains(attributeInfo.targetTypesSupported, targetType) { + continue + } + + switch v, nt, k := d.Get(tfAttributeName), attributeInfo.tfNullableType, aws.String(attributeInfo.apiAttributeKey); nt { + case schema.TypeBool: + v := v.(string) + if v, null, _ := nullable.Bool(v).Value(); !null { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + } + case schema.TypeInt: + v := v.(string) + if v, null, _ := nullable.Int(v).Value(); !null { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.Int64ValueToString(v), + }) + } + default: + switch attributeInfo.tfType { + case schema.TypeBool: + v := v.(bool) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + case schema.TypeInt: + v := v.(int) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + case schema.TypeString: + if v := v.(string); v != "" { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: aws.String(v), + }) + } + } + } + + switch v, t, k := d.Get(tfAttributeName), attributeInfo.tfType, aws.String(attributeInfo.apiAttributeKey); t { + case schema.TypeBool: + v := v.(bool) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + case schema.TypeInt: + v := v.(int) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + case schema.TypeString: + if v := v.(string); v != "" { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: aws.String(v), + }) + } + } + } + + return apiObjects +} + +func (m targetGroupAttributeMap) flatten(d *schema.ResourceData, targetType string, apiObjects []*elbv2.TargetGroupAttribute) { + for tfAttributeName, attributeInfo := range m { + if !slices.Contains(attributeInfo.targetTypesSupported, targetType) { + continue + } + + k := attributeInfo.apiAttributeKey + i := slices.IndexFunc(apiObjects, func(v *elbv2.TargetGroupAttribute) bool { + return aws.StringValue(v.Key) == k + }) + + if i == -1 { + continue + } + + switch v, t := apiObjects[i].Value, attributeInfo.tfType; t { + case schema.TypeBool: + d.Set(tfAttributeName, flex.StringToBoolValue(v)) + case schema.TypeInt: + d.Set(tfAttributeName, flex.StringToIntValue(v)) + case schema.TypeString: + d.Set(tfAttributeName, v) + } + } +} + func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*elbv2.TargetGroup, error) { input := &elbv2.DescribeTargetGroupsInput{ TargetGroupArns: aws.StringSlice([]string{arn}), From 9512e32c73477c5945e72dc7ff097e8c4aa87e3b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:07:46 -0500 Subject: [PATCH 101/123] autoflex: Lint --- internal/framework/flex/auto_expand.go | 10 +++++----- internal/framework/flex/auto_flatten.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index 36b8d10fe1e..cfb464dacaf 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -531,13 +531,13 @@ func (expander autoExpander) nestedObject(ctx context.Context, vFrom fwtypes.Nes // // types.List(OfObject) -> map[string]struct // - diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tElem, vTo)...) return diags case reflect.Ptr: // // types.List(OfObject) -> map[string]*struct // - diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tElem, vTo)...) return diags } @@ -631,7 +631,7 @@ func (expander autoExpander) nestedObjectToSlice(ctx context.Context, vFrom fwty } // nestedKeyObjectToMap copies a Plugin Framework NestedObjectValue to a compatible AWS API map[string]struct value. -func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tSlice, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { +func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics // Get the nested Objects as a slice. @@ -656,7 +656,7 @@ func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwt return diags } - key, d := blockKeyMap(ctx, f.Index(i).Interface()) + key, d := blockKeyMap(f.Index(i).Interface()) diags.Append(d...) if diags.HasError() { return diags @@ -747,7 +747,7 @@ func (expander autoExpander) mappedObjectToStruct(ctx context.Context, vFrom fwt } // blockKeyMap takes a struct and extracts the value of the `key` -func blockKeyMap(ctx context.Context, from any) (reflect.Value, diag.Diagnostics) { +func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { var diags diag.Diagnostics valFrom := reflect.ValueOf(from) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 0e5450718c5..e2fe57c538c 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -739,7 +739,7 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } - d = blockKeyMapSet(ctx, target, key.String()) + d = blockKeyMapSet(target, key.String()) diags.Append(d...) t.Index(i).Set(reflect.ValueOf(target)) @@ -851,7 +851,7 @@ func (flattener autoFlattener) sliceOfStructNestedObject(ctx context.Context, vF } // blockKeyMapSet takes a struct and assigns the value of the `key` -func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { +func blockKeyMapSet(to any, key string) diag.Diagnostics { var diags diag.Diagnostics valTo := reflect.ValueOf(to) From 6e1f3f4821cb034c86c76d2c8274671005785574 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:16:14 -0500 Subject: [PATCH 102/123] autoflex: Sort slices for testing with cmp --- internal/framework/flex/auto_flatten_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 6115d01af4e..f7854197ef1 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -5,11 +5,13 @@ package flex import ( "context" + "fmt" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" @@ -902,11 +904,13 @@ func TestFlattenGeneric(t *testing.T) { t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) } + less := func(a, b any) bool { return fmt.Sprint(a) < fmt.Sprint(b) } + if gotErr { if !testCase.WantErr { t.Errorf("err = %q", err) } - } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget); diff != "" { + } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget, cmpopts.SortSlices(less)); diff != "" { t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) From 13fc8771a2fdf8e213d2a31bcd679a70742a6079 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 11:29:36 -0600 Subject: [PATCH 103/123] add CHANGELOG entry --- .changelog/34848.txt | 3 +++ internal/service/dynamodb/status.go | 4 ++++ internal/service/dynamodb/table.go | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 .changelog/34848.txt diff --git a/.changelog/34848.txt b/.changelog/34848.txt new file mode 100644 index 00000000000..14b3201a669 --- /dev/null +++ b/.changelog/34848.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created +``` \ No newline at end of file diff --git a/internal/service/dynamodb/status.go b/internal/service/dynamodb/status.go index b6c5d92c88d..ed06fafaf84 100644 --- a/internal/service/dynamodb/status.go +++ b/internal/service/dynamodb/status.go @@ -56,6 +56,10 @@ func statusImport(ctx context.Context, conn *dynamodb.DynamoDB, importArn string } output, err := conn.DescribeImportWithContext(ctx, describeImportInput) + if tfawserr.ErrCodeEquals(err, dynamodb.ErrCodeResourceNotFoundException) { + return nil, "", nil + } + if err != nil { return nil, "", err } diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index bdbe09fbffe..3c3746fc296 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -623,9 +623,9 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter } importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn - if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err = waitImportComplete(ctx, conn, aws.StringValue(importArn), d.Timeout(schema.TimeoutCreate)); err != nil { d.SetId(tableName) - return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, tableName, err) } } else { input := &dynamodb.CreateTableInput{ From 03a55b2bc80e5eeb77347f7a7a5738f16e9fbfc3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:30:35 -0500 Subject: [PATCH 104/123] r/aws_lb_target_group: Corrections. --- internal/service/elbv2/target_group.go | 46 ++++++--------------- internal/service/elbv2/target_group_test.go | 4 -- 2 files changed, 13 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index d523f6edfd9..dcfdaed20a9 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -773,19 +773,21 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin default: switch attributeInfo.tfType { case schema.TypeBool: - v := v.(bool) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.BoolValueToString(v), - }) + if v := v.(bool); v || update { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + } case schema.TypeInt: - v := v.(int) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.IntValueToString(v), - }) + if v := v.(int); v > 0 || update { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + } case schema.TypeString: - if v := v.(string); v != "" { + if v := v.(string); v != "" || update { apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ Key: k, Value: aws.String(v), @@ -793,28 +795,6 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin } } } - - switch v, t, k := d.Get(tfAttributeName), attributeInfo.tfType, aws.String(attributeInfo.apiAttributeKey); t { - case schema.TypeBool: - v := v.(bool) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.BoolValueToString(v), - }) - case schema.TypeInt: - v := v.(int) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.IntValueToString(v), - }) - case schema.TypeString: - if v := v.(string); v != "" { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: aws.String(v), - }) - } - } } return apiObjects diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 602b7188380..14ae03e63d0 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -4083,10 +4083,6 @@ func testAccCheckTargetGroupExists(ctx context.Context, n string, v *elbv2.Targe return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No ELBv2 Target Group ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) output, err := tfelbv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) From b537cde7c6c386d7dbdd81cf5f7fdc66d7b301e9 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:35:47 -0500 Subject: [PATCH 105/123] autoflex: Testing ordering --- internal/framework/flex/auto_flatten.go | 11 ----------- internal/framework/flex/auto_flatten_test.go | 20 +------------------- 2 files changed, 1 insertion(+), 30 deletions(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index e2fe57c538c..fd118d06cb2 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -715,14 +715,8 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom t := reflect.ValueOf(to) - //tStruct := t.Type().Elem() - //if tStruct.Kind() == reflect.Ptr { - // tStruct = tStruct.Elem() - //} - i := 0 for _, key := range vFrom.MapKeys() { - //target := reflect.New(tStruct) target, d := tTo.NewObjectPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -744,11 +738,6 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom t.Index(i).Set(reflect.ValueOf(target)) i++ - //if t.Type().Elem().Kind() == reflect.Struct { - // t.SetMapIndex(key, target.Elem()) - //} else { - // t.SetMapIndex(key, target) - //} } val, d := tTo.ValueFromObjectSlice(ctx, to) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index f7854197ef1..41702f250e6 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -757,10 +757,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -771,11 +767,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, @@ -787,10 +778,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -801,11 +788,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, @@ -904,7 +886,7 @@ func TestFlattenGeneric(t *testing.T) { t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) } - less := func(a, b any) bool { return fmt.Sprint(a) < fmt.Sprint(b) } + less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } if gotErr { if !testCase.WantErr { From d4d4cd5ede4b6e81d3f15a5b7b04f73014a2a91c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:37:32 -0500 Subject: [PATCH 106/123] autoflex: Testing ordering --- internal/framework/flex/auto_flatten_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 41702f250e6..ed98e88250e 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -799,10 +799,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -813,11 +809,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, From 8638cb636d9f9c9c8f87be4cb1c56798e6ff1490 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:45:05 -0500 Subject: [PATCH 107/123] Add 'verify.StringHasPrefix'. --- internal/verify/validate.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/internal/verify/validate.go b/internal/verify/validate.go index 57a0d9752e2..cf2390c83cd 100644 --- a/internal/verify/validate.go +++ b/internal/verify/validate.go @@ -460,6 +460,23 @@ func FloatGreaterThan(threshold float64) schema.SchemaValidateFunc { } } +func StringHasPrefix(prefix string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (warnings []string, errors []error) { + s, ok := v.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if !strings.HasPrefix(s, prefix) { + errors = append(errors, fmt.Errorf("expected %s to have prefix %s, got %s", k, prefix, s)) + return + } + + return warnings, errors + } +} + func ValidServicePrincipal(v interface{}, k string) (ws []string, errors []error) { value := v.(string) From 75a91831b713db097bb0ed61a823beb5f03f479f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Dec 2023 13:25:14 -0500 Subject: [PATCH 108/123] build(deps): bump the terraform-devex group with 2 updates (#34941) Bumps the terraform-devex group with 2 updates: [github.com/hashicorp/terraform-plugin-mux](https://github.com/hashicorp/terraform-plugin-mux) and [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk). Updates `github.com/hashicorp/terraform-plugin-mux` from 0.12.0 to 0.13.0 - [Release notes](https://github.com/hashicorp/terraform-plugin-mux/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-mux/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-mux/compare/v0.12.0...v0.13.0) Updates `github.com/hashicorp/terraform-plugin-sdk/v2` from 2.30.0 to 2.31.0 - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.30.0...v2.31.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-mux dependency-type: direct:production update-type: version-update:semver-minor dependency-group: terraform-devex - dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: terraform-devex ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 16 ++++++++-------- go.sum | 37 ++++++++++++++++++------------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 553689a5b38..78a783a30a1 100644 --- a/go.mod +++ b/go.mod @@ -117,10 +117,10 @@ require ( github.com/hashicorp/terraform-plugin-framework v1.4.2 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 - github.com/hashicorp/terraform-plugin-go v0.19.1 + github.com/hashicorp/terraform-plugin-go v0.20.0 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-mux v0.12.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 + github.com/hashicorp/terraform-plugin-mux v0.13.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/hashicorp/terraform-plugin-testing v1.6.0 github.com/jmespath/go-jmespath v0.4.0 github.com/mattbaird/jsonpatch v0.0.0-20230413205102-771768614e91 @@ -176,8 +176,8 @@ require ( github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-plugin v1.5.2 // indirect - github.com/hashicorp/hc-install v0.6.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/hc-install v0.6.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.19.0 // indirect github.com/hashicorp/terraform-json v0.18.0 // indirect @@ -206,13 +206,13 @@ require ( go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/mod v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.59.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 1e073c94d70..dd14e373ab9 100644 --- a/go.sum +++ b/go.sum @@ -13,7 +13,6 @@ github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= github.com/YakDriver/regexache v0.23.0/go.mod h1:K4BZ3MYKAqSFbYWqmbsG+OzYUDyJjnMEr27DJEsVG3U= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -274,7 +273,7 @@ github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlL github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -320,15 +319,15 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= -github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= +github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= +github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= @@ -343,12 +342,12 @@ github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaK github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= -github.com/hashicorp/terraform-plugin-go v0.19.1 h1:lf/jTGTeELcz5IIbn/94mJdmnTjRYm6S6ct/JqCSr50= -github.com/hashicorp/terraform-plugin-go v0.19.1/go.mod h1:5NMIS+DXkfacX6o5HCpswda5yjkSYfKzn1Nfl9l+qRs= -github.com/hashicorp/terraform-plugin-mux v0.12.0 h1:TJlmeslQ11WlQtIFAfth0vXx+gSNgvMEng2Rn9z3WZY= -github.com/hashicorp/terraform-plugin-mux v0.12.0/go.mod h1:8MR0AgmV+Q03DIjyrAKxXyYlq2EUnYBQP8gxAAA0zeM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao= +github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= +github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= +github.com/hashicorp/terraform-plugin-mux v0.13.0 h1:79U401/3nd8CWwDGtTHc8F3miSCAS9XGtVarxSTDgwA= +github.com/hashicorp/terraform-plugin-mux v0.13.0/go.mod h1:Ndv0FtwDG2ogzH59y64f2NYimFJ6I0smRgFUKfm6dyQ= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= github.com/hashicorp/terraform-plugin-testing v1.6.0 h1:Wsnfh+7XSVRfwcr2jZYHsnLOnZl7UeaOBvsx6dl/608= github.com/hashicorp/terraform-plugin-testing v1.6.0/go.mod h1:cJGG0/8j9XhHaJZRC+0sXFI4uzqQZ9Az4vh6C4GJpFE= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= @@ -432,7 +431,7 @@ github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NF github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -483,8 +482,8 @@ golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -544,10 +543,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= From ddfc15e04f86581580957640d8042d96b28aeea6 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 18:27:32 +0000 Subject: [PATCH 109/123] Update CHANGELOG.md for #34941 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a04d9dd4c9..4c9d48d4ff7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,9 @@ ## 5.32.0 (Unreleased) + +ENHANCEMENTS: + +* data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) + ## 5.31.0 (December 15, 2023) FEATURES: From fc5f6a0d81aa8c50e980395e84605787be740d34 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 13:41:36 -0500 Subject: [PATCH 110/123] r/aws_lb_target_group: Tidy up 'flattenTargetGroupHealthCheck'. --- internal/service/elbv2/load_balancer.go | 12 +- internal/service/elbv2/target_group.go | 309 +++++++++--------- .../service/elbv2/target_group_data_source.go | 62 +--- 3 files changed, 159 insertions(+), 224 deletions(-) diff --git a/internal/service/elbv2/load_balancer.go b/internal/service/elbv2/load_balancer.go index 849b31031a7..45d7f76ba43 100644 --- a/internal/service/elbv2/load_balancer.go +++ b/internal/service/elbv2/load_balancer.go @@ -51,9 +51,9 @@ func ResourceLoadBalancer() *schema.Resource { }, CustomizeDiff: customdiff.Sequence( - customizeDiffALB, - customizeDiffNLB, - customizeDiffGWLB, + customizeDiffLoadBalancerALB, + customizeDiffLoadBalancerNLB, + customizeDiffLoadBalancerGWLB, verify.SetTagsDiff, ), @@ -1062,7 +1062,7 @@ func SuffixFromARN(arn *string) string { // cannot have security groups added if none are present, and cannot have // all security groups removed. If the type is 'network' and any of these // conditions are met, mark the diff as a ForceNew operation. -func customizeDiffNLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { // The current criteria for determining if the operation should be ForceNew: // - lb of type "network" // - existing resource (id is not "") @@ -1152,7 +1152,7 @@ func customizeDiffNLB(_ context.Context, diff *schema.ResourceDiff, v interface{ return nil } -func customizeDiffALB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerALB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumApplication { return nil } @@ -1208,7 +1208,7 @@ func customizeDiffALB(_ context.Context, diff *schema.ResourceDiff, v interface{ return nil } -func customizeDiffGWLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerGWLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumGateway { return nil } diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index dcfdaed20a9..0a5007286e8 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -50,8 +50,8 @@ func ResourceTargetGroup() *schema.Resource { CustomizeDiff: customdiff.Sequence( resourceTargetGroupCustomizeDiff, - lambdaTargetHealthCheckProtocolCustomizeDiff, - nonLambdaValidationCustomizeDiff, + customizeDiffTargetGroupTargetTypeLambda, + customizeDiffTargetGroupTargetTypeNotLambda, verify.SetTagsDiff, ), @@ -110,7 +110,7 @@ func ResourceTargetGroup() *schema.Resource { Computed: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 1024), - validTargetGroupHealthCheckPath, + verify.StringHasPrefix("/"), ), }, "port": { @@ -236,10 +236,13 @@ func ResourceTargetGroup() *schema.Resource { Default: false, }, "slow_start": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validateSlowStart, + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.Any( + validation.IntBetween(0, 0), + validation.IntBetween(30, 900), + ), }, "stickiness": { Type: schema.TypeList, @@ -523,16 +526,15 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) + if err := d.Set("health_check", flattenTargetGroupHealthCheck(targetGroup)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) + } d.Set("ip_address_type", targetGroup.IpAddressType) d.Set("name", targetGroup.TargetGroupName) d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) targetType := aws.StringValue(targetGroup.TargetType) d.Set("target_type", targetType) - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) - } - if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { d.Set("port", targetGroup.Port) } @@ -934,28 +936,6 @@ func findTargetGroupAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn return output.Attributes, nil } -func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !strings.HasPrefix(value, "/") { - errors = append(errors, fmt.Errorf( - "%q must begin with a '/' character, got %q", k, value)) - } - return -} - -func validateSlowStart(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - // Check if the value is between 30-900 or 0 (seconds). - if value != 0 && !(value >= 30 && value <= 900) { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Slow Start Duration \"%d\". "+ - "Valid intervals are 30-900 or 0 to disable.", - k, value)) - } - return -} - func validTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { value := v.(string) @@ -989,6 +969,139 @@ func TargetGroupSuffixFromARN(arn *string) string { return "" } +func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { + healthCheck := make(map[string]any) + if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { + healthCheck = healthChecks[0].(map[string]interface{}) + } + + if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + if m := healthCheck["matcher"].(string); m != "" { + return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", + "health_check.matcher", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + ) + } + + if m := healthCheck["path"].(string); m != "" { + return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", + "health_check.path", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + ) + } + } + + protocol := diff.Get("protocol").(string) + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + "protocol", + protocol, + ) + } + } + + if diff.Id() == "" { + return nil + } + + return nil +} + +func customizeDiffTargetGroupTargetTypeLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { + if diff.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { + return nil + } + + if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { + healthCheck := healthChecks[0].(map[string]interface{}) + healthCheckProtocol := healthCheck["protocol"].(string) + + if healthCheckProtocol == elbv2.ProtocolEnumTcp { + return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + "target_type", + elbv2.TargetTypeEnumLambda, + ) + } + } + + return nil +} + +func customizeDiffTargetGroupTargetTypeNotLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { + targetType := diff.Get("target_type").(string) + if targetType == elbv2.TargetTypeEnumLambda { + return nil + } + + config := diff.GetRawConfig() + + if v := config.GetAttr("port"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "port", + "target_type", + targetType, + ) + } + + if v := config.GetAttr("protocol"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "protocol", + "target_type", + targetType, + ) + } + + if v := config.GetAttr("vpc_id"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "vpc_id", + "target_type", + targetType, + ) + } + + return nil +} + +func flattenTargetGroupHealthCheck(apiObject *elbv2.TargetGroup) []interface{} { + if apiObject == nil { + return []interface{}{} + } + + tfMap := map[string]interface{}{ + "enabled": aws.BoolValue(apiObject.HealthCheckEnabled), + "healthy_threshold": int(aws.Int64Value(apiObject.HealthyThresholdCount)), + "interval": int(aws.Int64Value(apiObject.HealthCheckIntervalSeconds)), + "port": aws.StringValue(apiObject.HealthCheckPort), + "protocol": aws.StringValue(apiObject.HealthCheckProtocol), + "timeout": int(aws.Int64Value(apiObject.HealthCheckTimeoutSeconds)), + "unhealthy_threshold": int(aws.Int64Value(apiObject.UnhealthyThresholdCount)), + } + + if v := apiObject.HealthCheckPath; v != nil { + tfMap["path"] = aws.StringValue(v) + } + + if apiObject := apiObject.Matcher; apiObject != nil { + if v := apiObject.HttpCode; v != nil { + tfMap["matcher"] = aws.StringValue(v) + } + if v := apiObject.GrpcCode; v != nil { + tfMap["matcher"] = aws.StringValue(v) + } + } + + return []interface{}{tfMap} +} + func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { if tfMap == nil { return nil @@ -1150,136 +1263,6 @@ func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGro return tfMap } -func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - healthCheck := make(map[string]any) - if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck = healthChecks[0].(map[string]interface{}) - } - - if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { - if m := healthCheck["matcher"].(string); m != "" { - return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", - "health_check.matcher", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - ) - } - - if m := healthCheck["path"].(string); m != "" { - return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", - "health_check.path", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - ) - } - } - - protocol := diff.Get("protocol").(string) - - switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { - return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - "protocol", - protocol, - ) - } - } - - if diff.Id() == "" { - return nil - } - - return nil -} - -func lambdaTargetHealthCheckProtocolCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - if diff.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - return nil - } - - if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) - healthCheckProtocol := healthCheck["protocol"].(string) - - if healthCheckProtocol == elbv2.ProtocolEnumTcp { - return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - "target_type", - elbv2.TargetTypeEnumLambda, - ) - } - } - - return nil -} - -func nonLambdaValidationCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - targetType := diff.Get("target_type").(string) - if targetType == elbv2.TargetTypeEnumLambda { - return nil - } - - config := diff.GetRawConfig() - - if v := config.GetAttr("port"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "port", - "target_type", - targetType, - ) - } - - if v := config.GetAttr("protocol"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "protocol", - "target_type", - targetType, - ) - } - - if v := config.GetAttr("vpc_id"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "vpc_id", - "target_type", - targetType, - ) - } - - return nil -} - -func flattenLbTargetGroupHealthCheck(targetGroup *elbv2.TargetGroup) []interface{} { - if targetGroup == nil { - return []interface{}{} - } - - m := map[string]interface{}{ - "enabled": aws.BoolValue(targetGroup.HealthCheckEnabled), - "healthy_threshold": int(aws.Int64Value(targetGroup.HealthyThresholdCount)), - "interval": int(aws.Int64Value(targetGroup.HealthCheckIntervalSeconds)), - "port": aws.StringValue(targetGroup.HealthCheckPort), - "protocol": aws.StringValue(targetGroup.HealthCheckProtocol), - "timeout": int(aws.Int64Value(targetGroup.HealthCheckTimeoutSeconds)), - "unhealthy_threshold": int(aws.Int64Value(targetGroup.UnhealthyThresholdCount)), - } - - if targetGroup.HealthCheckPath != nil { - m["path"] = aws.StringValue(targetGroup.HealthCheckPath) - } - if targetGroup.Matcher != nil && targetGroup.Matcher.HttpCode != nil { - m["matcher"] = aws.StringValue(targetGroup.Matcher.HttpCode) - } - if targetGroup.Matcher != nil && targetGroup.Matcher.GrpcCode != nil { - m["matcher"] = aws.StringValue(targetGroup.Matcher.GrpcCode) - } - - return []interface{}{m} -} - func pathString(path cty.Path) string { var buf strings.Builder for i, step := range path { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 85b4dd1b326..86b78ca2ebb 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -6,7 +6,6 @@ package elbv2 import ( "context" "log" - "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -220,20 +219,18 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta } targetGroup := results[0] - d.SetId(aws.StringValue(targetGroup.TargetGroupArn)) - d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) - d.Set("name", targetGroup.TargetGroupName) - d.Set("target_type", targetGroup.TargetType) - - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { + if err := d.Set("health_check", flattenTargetGroupHealthCheck(targetGroup)); err != nil { return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } + d.Set("name", targetGroup.TargetGroupName) + targetType := aws.StringValue(targetGroup.TargetType) + d.Set("target_type", targetType) var protocol string - if v, _ := d.Get("target_type").(string); v != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { d.Set("port", targetGroup.Port) protocol = aws.StringValue(targetGroup.Protocol) d.Set("protocol", protocol) @@ -250,57 +247,12 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "deregistration_delay.timeout_seconds": - timeout, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.timeout_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("deregistration_delay", timeout) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } - if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } + targetGroupAttributes.flatten(d, targetType, attributes) + tags, err := listTags(ctx, conn, d.Id()) if errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { From fddd2056de203818eb9c56d1282cafde07bd24fe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 13:53:23 -0500 Subject: [PATCH 111/123] d/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString`. --- .changelog/31436.txt | 4 ++++ internal/service/elbv2/target_group_data_source.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.changelog/31436.txt b/.changelog/31436.txt index cd5e71e36c0..f9bc005b5ff 100644 --- a/.changelog/31436.txt +++ b/.changelog/31436.txt @@ -1,3 +1,7 @@ ```release-note:bug resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` +``` + +```release-note:bug +data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ``` \ No newline at end of file diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 86b78ca2ebb..f4fc4b88208 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -44,7 +44,7 @@ func DataSourceTargetGroup() *schema.Resource { Computed: true, }, "deregistration_delay": { - Type: schema.TypeInt, + Type: schema.TypeString, Computed: true, }, "health_check": { From cd2603a848baf0e1d9a65e182aa07683f4c222e5 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 14:12:43 -0500 Subject: [PATCH 112/123] autoflex: Allow set block map keys --- internal/framework/flex/auto_expand_test.go | 30 +++++++++ internal/framework/flex/auto_flatten.go | 69 ++++++++++++++++++++ internal/framework/flex/auto_flatten_test.go | 21 ++++++ internal/framework/flex/autoflex_test.go | 4 ++ 4 files changed, 124 insertions(+) diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index 4781197f00f..b001c0ea557 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -605,6 +605,36 @@ func TestExpandGeneric(t *testing.T) { }, }, }, + { + TestName: "block set key map", + Source: &TestFlexBlockKeyMapTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, { TestName: "block key map ptr source", Source: &TestFlexBlockKeyMapTF01{ diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index fd118d06cb2..3b0723becdf 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -508,6 +508,14 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT switch tMapElem := vFrom.Type().Elem(); tMapElem.Kind() { case reflect.Struct: switch tTo := tTo.(type) { + case basetypes.SetTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.ListTypable: // // map[string]struct -> fwtypes.ListNestedObjectOf[Object] @@ -751,6 +759,67 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } +/* +func (flattener autoFlattener) structMapToObjectSet(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + val, d := tTo.NullValue(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + return diags + } + + n := vFrom.Len() + to, d := tTo.NewObjectSlice(ctx, n, n) + diags.Append(d...) + if diags.HasError() { + return diags + } + + t := reflect.ValueOf(to) + + i := 0 + for _, key := range vFrom.MapKeys() { + target, d := tTo.NewObjectPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + fromInterface := vFrom.MapIndex(key).Interface() + if vFrom.MapIndex(key).Kind() == reflect.Ptr { + fromInterface = vFrom.MapIndex(key).Elem().Interface() + } + + diags.Append(autoFlexConvertStruct(ctx, fromInterface, target, flattener)...) + if diags.HasError() { + return diags + } + + d = blockKeyMapSet(target, key.String()) + diags.Append(d...) + + t.Index(i).Set(reflect.ValueOf(target)) + i++ + } + + val, d := tTo.ValueFromObjectSlice(ctx, to) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + + return diags +} +*/ + // structToNestedObject copies an AWS API struct value to a compatible Plugin Framework NestedObjectValue value. func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom reflect.Value, isNullFrom bool, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index ed98e88250e..fbd6da59844 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -770,6 +770,27 @@ func TestFlattenGeneric(t *testing.T) { }), }, }, + { + TestName: "block key set map", + Source: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF03{}, + WantTarget: &TestFlexBlockKeyMapTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, { TestName: "block key map ptr source", Source: &TestFlexBlockKeyMapAWS03{ diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 491874db027..6ae32a5ed8d 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -308,6 +308,10 @@ type TestFlexBlockKeyMapTF02 struct { Attr2 types.String `tfsdk:"attr2"` } +type TestFlexBlockKeyMapTF03 struct { + BlockMap fwtypes.SetNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +} + type TestFlexBlockKeyMapAWS01 struct { BlockMap map[string]TestFlexBlockKeyMapAWS02 } From 80904fe84382d3ee3dcc25ab6b5505b2ac4b96d4 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 14:54:38 -0500 Subject: [PATCH 113/123] autoflex: Rename map --- internal/framework/flex/auto_expand.go | 4 +- internal/framework/flex/auto_expand_test.go | 96 ++++++++++---------- internal/framework/flex/auto_flatten.go | 4 +- internal/framework/flex/auto_flatten_test.go | 72 +++++++-------- internal/framework/flex/autoflex.go | 4 +- internal/framework/flex/autoflex_test.go | 26 +++--- 6 files changed, 103 insertions(+), 103 deletions(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index cfb464dacaf..ad2f799470e 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -762,7 +762,7 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { } // go from StringValue to string - if field.Name == BlockKeyMap { + if field.Name == MapBlockKey { if v, ok := valFrom.Field(i).Interface().(basetypes.StringValue); ok { return reflect.ValueOf(v.ValueString()), diags } @@ -770,7 +770,7 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { } } - diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", MapBlockKey)) return reflect.Zero(reflect.TypeOf("")), diags } diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index b001c0ea557..b6ae8721391 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -576,24 +576,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + TestName: "map block key list", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -606,24 +606,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block set key map", - Source: &TestFlexBlockKeyMapTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + TestName: "map block key set", + Source: &TestFlexMapBlockKeyTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -636,24 +636,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map ptr source", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + TestName: "map block key ptr source", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -666,24 +666,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map ptr both", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + TestName: "map block key ptr both", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS03{}, - WantTarget: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS03{}, + WantTarget: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 3b0723becdf..f29ee88ccc6 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -928,7 +928,7 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { continue // Skip unexported fields. } - if field.Name != BlockKeyMap { + if field.Name != MapBlockKey { continue } @@ -940,7 +940,7 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { return diags } - diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", MapBlockKey)) return diags } diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index fbd6da59844..69fc1151d4a 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -750,85 +750,85 @@ func TestFlattenGeneric(t *testing.T) { }, }, { - TestName: "block key map", - Source: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + TestName: "map block key list", + Source: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key set map", - Source: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + TestName: "map block key set", + Source: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF03{}, - WantTarget: &TestFlexBlockKeyMapTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF03{}, + WantTarget: &TestFlexMapBlockKeyTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key map ptr source", - Source: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + TestName: "map block key ptr source", + Source: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key map ptr both", - Source: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + TestName: "map block key ptr both", + Source: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index fda37eb9b05..ace1d4ecf52 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -18,7 +18,7 @@ type ResourcePrefixCtxKey string const ( ResourcePrefix ResourcePrefixCtxKey = "RESOURCE_PREFIX" ResourcePrefixRecurse ResourcePrefixCtxKey = "RESOURCE_PREFIX_RECURSE" - BlockKeyMap = "TFBlockKeyMap" + MapBlockKey = "MapBlockKey" ) // Expand = TF --> AWS @@ -95,7 +95,7 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle if fieldName == "Tags" { continue // Resource tags are handled separately. } - if fieldName == BlockKeyMap { + if fieldName == MapBlockKey { continue } diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 6ae32a5ed8d..b30fac0c715 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -298,29 +298,29 @@ type TestFlexTF18 struct { Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` } -type TestFlexBlockKeyMapTF01 struct { - BlockMap fwtypes.ListNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +type TestFlexMapBlockKeyTF01 struct { + BlockMap fwtypes.ListNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` } -type TestFlexBlockKeyMapTF02 struct { - TFBlockKeyMap types.String `tfsdk:"block_key_map"` - Attr1 types.String `tfsdk:"attr1"` - Attr2 types.String `tfsdk:"attr2"` +type TestFlexMapBlockKeyTF02 struct { + MapBlockKey types.String `tfsdk:"map_block_key"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` } -type TestFlexBlockKeyMapTF03 struct { - BlockMap fwtypes.SetNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +type TestFlexMapBlockKeyTF03 struct { + BlockMap fwtypes.SetNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` } -type TestFlexBlockKeyMapAWS01 struct { - BlockMap map[string]TestFlexBlockKeyMapAWS02 +type TestFlexMapBlockKeyAWS01 struct { + BlockMap map[string]TestFlexMapBlockKeyAWS02 } -type TestFlexBlockKeyMapAWS02 struct { +type TestFlexMapBlockKeyAWS02 struct { Attr1 string Attr2 string } -type TestFlexBlockKeyMapAWS03 struct { - BlockMap map[string]*TestFlexBlockKeyMapAWS02 +type TestFlexMapBlockKeyAWS03 struct { + BlockMap map[string]*TestFlexMapBlockKeyAWS02 } From cb79548414329a4d88755a7be85beb225b5ab7e3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:29:43 -0500 Subject: [PATCH 114/123] r/aws_lb_target_group_attachment: Alphabetize attributes. --- .../service/elbv2/target_group_attachment.go | 71 +++++++++---------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 70433ac3a2b..13790803de9 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -30,29 +30,26 @@ func ResourceTargetGroupAttachment() *schema.Resource { DeleteWithoutTimeout: resourceAttachmentDelete, Schema: map[string]*schema.Schema{ + "availability_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, "target_group_arn": { Type: schema.TypeString, ForceNew: true, Required: true, }, - "target_id": { Type: schema.TypeString, ForceNew: true, Required: true, }, - "port": { Type: schema.TypeInt, ForceNew: true, Optional: true, }, - - "availability_zone": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, }, } } @@ -107,35 +104,6 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta return diags } -func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) - } - - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - } - - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) - } - - return diags -} - // resourceAttachmentRead requires all of the fields in order to describe the correct // target, so there is no work to do beyond ensuring that the target and group still exist. func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -204,3 +172,32 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in return diags } + +func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + + target := &elbv2.TargetDescription{ + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("port"); ok { + target.Port = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("availability_zone"); ok { + target.AvailabilityZone = aws.String(v.(string)) + } + + params := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), + Targets: []*elbv2.TargetDescription{target}, + } + + _, err := conn.DeregisterTargetsWithContext(ctx, params) + if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) + } + + return diags +} From b053503658aa426acae4b125b750b57281e96fae Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:35:00 -0500 Subject: [PATCH 115/123] r/aws_lb_target_group_attachment: Tidy up Create. --- .../service/elbv2/target_group_attachment.go | 46 ++++++------------- 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 13790803de9..d344454236f 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -5,7 +5,6 @@ package elbv2 import ( "context" - "fmt" "log" "time" @@ -14,7 +13,6 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -58,48 +56,32 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.RegisterTargetsInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - params := &elbv2.RegisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string), - d.Get("target_group_arn").(string)) - - err := retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { - _, err := conn.RegisterTargetsWithContext(ctx, params) - - if tfawserr.ErrCodeEquals(err, "InvalidTarget") { - return retry.RetryableError(fmt.Errorf("attaching instance to LB, retrying: %s", err)) - } - - if err != nil { - return retry.NonRetryableError(err) - } + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { + return conn.RegisterTargetsWithContext(ctx, input) + }, elbv2.ErrCodeInvalidTargetException) - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.RegisterTargetsWithContext(ctx, params) - } if err != nil { - return sdkdiag.AppendErrorf(diags, "registering targets with target group: %s", err) + return sdkdiag.AppendErrorf(diags, "registering ELBv2 Target Group (%s) target: %s", targetGroupARN, err) } //lintignore:R016 // Allow legacy unstable ID usage in managed resource - d.SetId(id.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn")))) + d.SetId(id.PrefixedUniqueId(targetGroupARN + "-")) return diags } From b80ce96b41d5f3076e189d0bf276498bfd29b4cc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:32:58 -0500 Subject: [PATCH 116/123] r/aws_lb_target_group_attachment: Tidy up Delete. --- .../service/elbv2/target_group_attachment.go | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index d344454236f..429a6725277 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -159,26 +159,31 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("availability_zone"); ok { + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, + log.Printf("[DEBUG] Deleting ELBv2 Target Group Attachment: %s", d.Id()) + _, err := conn.DeregisterTargetsWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return diags } - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deregistering ELBv2 Target Group (%s) target: %s", targetGroupARN, err) } return diags From 4dcf39cabb2df61e0527a1b9db29fdfbfa867a1b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:54:00 -0500 Subject: [PATCH 117/123] r/aws_lb_target_group_attachment: Tidy up Read. --- .../service/elbv2/target_group_attachment.go | 101 ++++++++++-------- 1 file changed, 57 insertions(+), 44 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 429a6725277..30be210e8df 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -92,66 +93,47 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - resp, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - }) - - if err != nil { - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException) { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "reading Target Health: %s", err) + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - for _, targetDesc := range resp.TargetHealthDescriptions { - if targetDesc == nil || targetDesc.Target == nil { - continue - } + output, err := FindTargetHealthDescription(ctx, conn, input) - if aws.StringValue(targetDesc.Target.Id) == d.Get("target_id").(string) { - // These will catch targets being removed by hand (draining as we plan) or that have been removed for a while - // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the - // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. - if targetDesc.TargetHealth == nil { - continue - } + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) + d.SetId("") + return diags + } - reason := aws.StringValue(targetDesc.TargetHealth.Reason) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group Attachment (%s): %s", d.Id(), err) + } - if reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { - log.Printf("[WARN] Target Attachment does not exist, recreating attachment") + // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while + // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the + // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. + if v := output.TargetHealth; v != nil { + if reason := aws.StringValue(v.Reason); reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { + if !d.IsNewResource() { + log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) d.SetId("") return diags } } } - if len(resp.TargetHealthDescriptions) != 1 { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - return diags } @@ -188,3 +170,34 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta return diags } + +func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) (*elbv2.TargetHealthDescription, error) { + output, err := findTargetHealthDescriptions(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) ([]*elbv2.TargetHealthDescription, error) { + output, err := conn.DescribeTargetHealthWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException, elbv2.ErrCodeTargetGroupNotFoundException) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.TargetHealthDescriptions, nil +} From 767fd802893c4d960f87d03d8eeb5f3c49821a26 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:21:01 -0500 Subject: [PATCH 118/123] r/aws_lb_target_group_attachment: Tidy up Read. --- .../service/elbv2/target_group_attachment.go | 44 ++++++++++++------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 30be210e8df..d58adbf1a31 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -109,7 +110,7 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in input.Targets[0].Port = aws.Int64(int64(v.(int))) } - output, err := FindTargetHealthDescription(ctx, conn, input) + _, err := FindTargetHealthDescription(ctx, conn, input) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) @@ -121,19 +122,6 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group Attachment (%s): %s", d.Id(), err) } - // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while - // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the - // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. - if v := output.TargetHealth; v != nil { - if reason := aws.StringValue(v.Reason); reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { - if !d.IsNewResource() { - log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) - d.SetId("") - return diags - } - } - } - return diags } @@ -172,7 +160,21 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta } func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) (*elbv2.TargetHealthDescription, error) { - output, err := findTargetHealthDescriptions(ctx, conn, input) + output, err := findTargetHealthDescriptions(ctx, conn, input, func(v *elbv2.TargetHealthDescription) bool { + // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while + // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the + // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. + if v := v.TargetHealth; v != nil { + switch reason := aws.StringValue(v.Reason); reason { + case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress, elbv2.TargetHealthReasonEnumTargetNotRegistered: + return false + default: + return true + } + } + + return false + }) if err != nil { return nil, err @@ -181,7 +183,9 @@ func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input * return tfresource.AssertSinglePtrResult(output) } -func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) ([]*elbv2.TargetHealthDescription, error) { +func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput, filter tfslices.Predicate[*elbv2.TargetHealthDescription]) ([]*elbv2.TargetHealthDescription, error) { + var targetHealthDescriptions []*elbv2.TargetHealthDescription + output, err := conn.DescribeTargetHealthWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException, elbv2.ErrCodeTargetGroupNotFoundException) { @@ -199,5 +203,11 @@ func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input return nil, tfresource.NewEmptyResultError(input) } - return output.TargetHealthDescriptions, nil + for _, v := range output.TargetHealthDescriptions { + if v != nil && filter(v) { + targetHealthDescriptions = append(targetHealthDescriptions, v) + } + } + + return targetHealthDescriptions, nil } From f8592d66ad5e102bd778a8a8c467859da00bf13a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:30:14 -0500 Subject: [PATCH 119/123] Add 'flex.StringValueToInt64'. --- internal/flex/flex.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 171c2e120bc..f7e12d1d02b 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -317,6 +317,13 @@ func StringToIntValue(v *string) int { return i } +// StringValueToInt64 converts a string to a Go int64 pointer value. +// Invalid integer strings are converted to 0. +func StringValueToInt64(v string) *int64 { + i, _ := strconv.Atoi(v) + return aws.Int64(int64(i)) +} + // Takes a string of resource attributes separated by the ResourceIdSeparator constant // returns the number of parts func ResourceIdPartCount(id string) int { From 06c960ca8678827b5e1e779ae2d31dfe6c367886 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:37:59 -0500 Subject: [PATCH 120/123] r/aws_lb_target_group_attachment: Tidy up acceptance tests. --- .../elbv2/target_group_attachment_test.go | 225 ++++++------------ 1 file changed, 73 insertions(+), 152 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment_test.go b/internal/service/elbv2/target_group_attachment_test.go index 7412ac9f716..521a6849781 100644 --- a/internal/service/elbv2/target_group_attachment_test.go +++ b/internal/service/elbv2/target_group_attachment_test.go @@ -5,24 +5,25 @@ package elbv2_test import ( "context" - "errors" "fmt" - "strconv" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -31,9 +32,9 @@ func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idInstance(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -42,7 +43,9 @@ func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), @@ -50,10 +53,10 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idInstance(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), - testAccCheckTargetGroupAttachmentDisappears(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroup(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -63,7 +66,8 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_alb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -72,9 +76,9 @@ func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_backwardsCompatibility(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_backwardsCompatibility(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_alb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -83,7 +87,8 @@ func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -92,9 +97,9 @@ func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_port(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_port(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -103,7 +108,8 @@ func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -112,9 +118,9 @@ func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idIPAddress(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idIPAddress(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -123,7 +129,8 @@ func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { func TestAccELBV2TargetGroupAttachment_lambda(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -132,49 +139,15 @@ func TestAccELBV2TargetGroupAttachment_lambda(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idLambda(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idLambda(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, }) } -func testAccCheckTargetGroupAttachmentDisappears(ctx context.Context, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Attachment not found: %s", n) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) - targetGroupArn := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - - _, hasPort := rs.Primary.Attributes["port"] - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) - } - - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - } - - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return fmt.Errorf("Error deregistering Targets: %s", err) - } - - return err - } -} - func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -182,37 +155,26 @@ func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) reso return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Target Group Attachment ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(rs.Primary.Attributes["target_id"]), + }}, } - describe, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - - if err != nil { - return err + if v := rs.Primary.Attributes["availability_zone"]; v != "" { + input.Targets[0].AvailabilityZone = aws.String(v) } - if len(describe.TargetHealthDescriptions) != 1 { - return errors.New("Target Group Attachment not found") + if v := rs.Primary.Attributes["port"]; v != "" { + input.Targets[0].Port = flex.StringValueToInt64(v) } - return nil + _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) + + return err } } @@ -225,95 +187,54 @@ func testAccCheckTargetGroupAttachmentDestroy(ctx context.Context) resource.Test continue } - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn := rs.Primary.Attributes["target_group_arn"] + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(rs.Primary.Attributes["target_id"]), + }}, + } - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), + if v := rs.Primary.Attributes["availability_zone"]; v != "" { + input.Targets[0].AvailabilityZone = aws.String(v) } - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) + + if v := rs.Primary.Attributes["port"]; v != "" { + input.Targets[0].Port = flex.StringValueToInt64(v) } - describe, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - if err == nil { - if len(describe.TargetHealthDescriptions) != 0 { - return fmt.Errorf("Target Group Attachment %q still exists", rs.Primary.ID) - } + _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) + + if tfresource.NotFound(err) { + continue } - // Verify the error - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) || tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException) { - return nil - } else { - return fmt.Errorf("Unexpected error checking LB destroyed: %s", err) + if err != nil { + return err } + + return fmt.Errorf("ELBv2 Target Group Attachment %s still exists", rs.Primary.ID) } return nil } } -func testAccTargetGroupAttachmentInstanceBaseConfig() string { - return ` -data "aws_availability_zones" "available" { - # t2.micro instance type is not available in these Availability Zones - exclude_zone_ids = ["usw2-az4"] - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -data "aws_ami" "amzn-ami-minimal-hvm-ebs" { - most_recent = true - owners = ["amazon"] - - filter { - name = "name" - values = ["amzn-ami-minimal-hvm-*"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } -} - +func testAccTargetGroupAttachmentCongig_baseEC2Instance(rName string) string { + return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinuxHVMEBSAMI(), acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" - subnet_id = aws_subnet.test.id -} - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = "10.0.1.0/24" - vpc_id = aws_vpc.test.id - - tags = { - Name = "tf-acc-test-lb-target-group-attachment" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" + subnet_id = aws_subnet.test[0].id tags = { - Name = "tf-acc-test-lb-target-group-attachment" + Name = %[1]q } } -` +`, rName)) } func testAccTargetGroupAttachmentConfig_idInstance(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -325,11 +246,11 @@ resource "aws_lb_target_group_attachment" "test" { target_group_arn = aws_lb_target_group.test.arn target_id = aws_instance.test.id } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_port(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -342,11 +263,11 @@ resource "aws_lb_target_group_attachment" "test" { target_id = aws_instance.test.id port = 80 } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_backwardsCompatibility(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -359,11 +280,11 @@ resource "aws_alb_target_group_attachment" "test" { target_id = aws_instance.test.id port = 80 } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_idIPAddress(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -377,7 +298,7 @@ resource "aws_lb_target_group_attachment" "test" { target_group_arn = aws_lb_target_group.test.arn target_id = aws_instance.test.private_ip } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_idLambda(rName string) string { From f05bbfd6eaa19fd662347211d617abb2897d0aed Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 18:13:46 -0500 Subject: [PATCH 121/123] Fix 'TestAccELBV2TargetGroupAttachment_disappears'. --- internal/service/elbv2/target_group_attachment_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/elbv2/target_group_attachment_test.go b/internal/service/elbv2/target_group_attachment_test.go index 521a6849781..85ad386a05b 100644 --- a/internal/service/elbv2/target_group_attachment_test.go +++ b/internal/service/elbv2/target_group_attachment_test.go @@ -56,7 +56,7 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeTestCheckFunc( testAccCheckTargetGroupAttachmentExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroup(), resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroupAttachment(), resourceName), ), ExpectNonEmptyPlan: true, }, From 270c0a414df0236f0d99aa20034a93eda7533378 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 08:18:01 -0500 Subject: [PATCH 122/123] Use 'v1.52.0' of the semgrep image. --- .github/workflows/semgrep-ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/semgrep-ci.yml b/.github/workflows/semgrep-ci.yml index c466ddb5b2d..8929554fe4e 100644 --- a/.github/workflows/semgrep-ci.yml +++ b/.github/workflows/semgrep-ci.yml @@ -21,7 +21,7 @@ jobs: name: Code Quality Scan runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: | @@ -43,7 +43,7 @@ jobs: name: Naming Scan Caps/AWS/EC2 runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -54,7 +54,7 @@ jobs: name: Test Configs Scan runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -65,7 +65,7 @@ jobs: name: Service Name Scan A-C runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -76,7 +76,7 @@ jobs: name: Service Name Scan C-I runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -87,7 +87,7 @@ jobs: name: Service Name Scan I-Q runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -98,7 +98,7 @@ jobs: name: Service Name Scan Q-Z runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 From fe2d19228385fa39c3c152e637e3c93e0a6119f1 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Mon, 18 Dec 2023 13:24:54 +0000 Subject: [PATCH 123/123] Update CHANGELOG.md for #34964 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c9d48d4ff7..53e46b675d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ ENHANCEMENTS: * data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) +BUG FIXES: + +* data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) +* resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) +* resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) + ## 5.31.0 (December 15, 2023) FEATURES: