diff --git a/.changelog/39256.txt b/.changelog/39256.txt new file mode 100644 index 00000000000..9aaabfda5da --- /dev/null +++ b/.changelog/39256.txt @@ -0,0 +1,3 @@ +```release-note:note +provider: Downgrades to Go `1.22.6`. A small number of users have reported failed or hanging network connections using the version of the Terraform AWS provider which was first built with Go `1.23.0` (`v5.65.0`). At this point, maintainers have been unable to reproduce failures, but enough distinct users have reported issues that we are going to attempt downgrading to Go `1.22.6` for the next provider release. We will continue to coordinate with users and AWS in an attempt to identify the root cause, using this upcoming release with a reverted Go build version as a data point. +``` diff --git a/.go-version b/.go-version index a6c2798a482..013173af5e9 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.0 +1.22.6 diff --git a/go.mod b/go.mod index 3fe9a05c0d8..394a53016d5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws -go 1.23.0 +go 1.22.6 require ( github.com/ProtonMail/go-crypto v1.1.0-alpha.5-proton diff --git a/internal/service/autoscaling/group.go b/internal/service/autoscaling/group.go index 6765fcb0f58..9b79e8687b0 100644 --- a/internal/service/autoscaling/group.go +++ b/internal/service/autoscaling/group.go @@ -8,7 +8,6 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "errors" "fmt" "log" - "slices" "strconv" "strings" "time" @@ -35,6 +34,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" @@ -1504,7 +1504,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // API only supports adding or removing 10 at a time. batchSize := 10 - for chunk := range slices.Chunk(expandTrafficSourceIdentifiers(os.Difference(ns).List()), batchSize) { + for _, chunk := range tfslices.Chunks(expandTrafficSourceIdentifiers(os.Difference(ns).List()), batchSize) { input := &autoscaling.DetachTrafficSourcesInput{ AutoScalingGroupName: aws.String(d.Id()), TrafficSources: chunk, @@ -1521,7 +1521,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } } - for chunk := range slices.Chunk(expandTrafficSourceIdentifiers(ns.Difference(os).List()), batchSize) { + for _, chunk := range tfslices.Chunks(expandTrafficSourceIdentifiers(ns.Difference(os).List()), batchSize) { input := &autoscaling.AttachTrafficSourcesInput{ AutoScalingGroupName: aws.String(d.Id()), TrafficSources: chunk, @@ -1545,7 +1545,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // API only supports adding or removing 10 at a time. batchSize := 10 - for chunk := range slices.Chunk(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { input := &autoscaling.DetachLoadBalancersInput{ AutoScalingGroupName: aws.String(d.Id()), LoadBalancerNames: chunk, @@ -1562,7 +1562,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } } - for chunk := range slices.Chunk(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { input := &autoscaling.AttachLoadBalancersInput{ AutoScalingGroupName: aws.String(d.Id()), LoadBalancerNames: chunk, @@ -1586,7 +1586,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter // API only supports adding or removing 10 at a time. batchSize := 10 - for chunk := range slices.Chunk(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(os.Difference(ns)), batchSize) { input := &autoscaling.DetachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(d.Id()), TargetGroupARNs: chunk, @@ -1603,7 +1603,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter } } - for chunk := range slices.Chunk(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { + for _, chunk := range tfslices.Chunks(flex.ExpandStringValueSet(ns.Difference(os)), batchSize) { input := &autoscaling.AttachLoadBalancerTargetGroupsInput{ AutoScalingGroupName: aws.String(d.Id()), TargetGroupARNs: chunk, @@ -1864,7 +1864,7 @@ func drainGroup(ctx context.Context, conn *autoscaling.Client, name string, inst } } const batchSize = 50 // API limit. - for chunk := range slices.Chunk(instanceIDs, batchSize) { + for _, chunk := range tfslices.Chunks(instanceIDs, batchSize) { input := &autoscaling.SetInstanceProtectionInput{ AutoScalingGroupName: aws.String(name), InstanceIds: chunk, diff --git a/internal/service/connect/routing_profile.go b/internal/service/connect/routing_profile.go index 791bd07429e..1ed8dd8413d 100644 --- a/internal/service/connect/routing_profile.go +++ b/internal/service/connect/routing_profile.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "slices" "strings" "github.com/aws/aws-sdk-go-v2/aws" @@ -21,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -361,7 +361,8 @@ func updateRoutingProfileQueueAssociations(ctx context.Context, conn *connect.Cl // the respective queues based on the diff detected // disassociate first since Queue and channel type combination cannot be duplicated - for chunk := range slices.Chunk(del, routingProfileQueueAssociationChunkSize) { + chunks := tfslices.Chunks(del, routingProfileQueueAssociationChunkSize) + for _, chunk := range chunks { var queueReferences []awstypes.RoutingProfileQueueReference for _, v := range chunk { if v := v.QueueReference; v != nil { @@ -384,7 +385,8 @@ func updateRoutingProfileQueueAssociations(ctx context.Context, conn *connect.Cl } } - for chunk := range slices.Chunk(add, routingProfileQueueAssociationChunkSize) { + chunks = tfslices.Chunks(add, routingProfileQueueAssociationChunkSize) + for _, chunk := range chunks { input := &connect.AssociateRoutingProfileQueuesInput{ InstanceId: aws.String(instanceID), QueueConfigs: chunk, diff --git a/internal/service/docdb/cluster_parameter_group.go b/internal/service/docdb/cluster_parameter_group.go index 910dec1a1bb..d530b2a85c2 100644 --- a/internal/service/docdb/cluster_parameter_group.go +++ b/internal/service/docdb/cluster_parameter_group.go @@ -8,7 +8,6 @@ import ( "fmt" "log" "reflect" - "slices" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -22,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -226,7 +226,7 @@ func modifyClusterParameterGroupParameters(ctx context.Context, conn *docdb.Clie clusterParameterGroupMaxParamsBulkEdit = 20 ) // We can only modify 20 parameters at a time, so chunk them until we've got them all. - for chunk := range slices.Chunk(parameters, clusterParameterGroupMaxParamsBulkEdit) { + for _, chunk := range tfslices.Chunks(parameters, clusterParameterGroupMaxParamsBulkEdit) { input := &docdb.ModifyDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(name), Parameters: chunk, diff --git a/internal/service/kafka/scram_secret_association.go b/internal/service/kafka/scram_secret_association.go index 51d96cad139..183e5017647 100644 --- a/internal/service/kafka/scram_secret_association.go +++ b/internal/service/kafka/scram_secret_association.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "log" - "slices" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/kafka" @@ -20,6 +19,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) @@ -163,7 +163,7 @@ func findSCRAMSecretsByClusterARN(ctx context.Context, conn *kafka.Client, clust } func associateSRAMSecrets(ctx context.Context, conn *kafka.Client, clusterARN string, secretARNs []string) error { - for chunk := range slices.Chunk(secretARNs, scramSecretBatchSize) { + for _, chunk := range tfslices.Chunks(secretARNs, scramSecretBatchSize) { input := &kafka.BatchAssociateScramSecretInput{ ClusterArn: aws.String(clusterARN), SecretArnList: chunk, @@ -184,7 +184,7 @@ func associateSRAMSecrets(ctx context.Context, conn *kafka.Client, clusterARN st } func disassociateSRAMSecrets(ctx context.Context, conn *kafka.Client, clusterARN string, secretARNs []string) error { - for chunk := range slices.Chunk(secretARNs, scramSecretBatchSize) { + for _, chunk := range tfslices.Chunks(secretARNs, scramSecretBatchSize) { input := &kafka.BatchDisassociateScramSecretInput{ ClusterArn: aws.String(clusterARN), SecretArnList: chunk, diff --git a/internal/service/lakeformation/lf_tag.go b/internal/service/lakeformation/lf_tag.go index 507d271e1bd..c083d1e9cac 100644 --- a/internal/service/lakeformation/lf_tag.go +++ b/internal/service/lakeformation/lf_tag.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "slices" "strings" "github.com/YakDriver/regexache" @@ -21,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -83,7 +83,7 @@ func resourceLFTagCreate(ctx context.Context, d *schema.ResourceData, meta inter id := lfTagCreateResourceID(catalogID, tagKey) i := 0 - for chunk := range slices.Chunk(tagValues.List(), lfTagsValuesMaxBatchSize) { + for _, chunk := range tfslices.Chunks(tagValues.List(), lfTagsValuesMaxBatchSize) { if i == 0 { input := &lakeformation.CreateLFTagInput{ CatalogId: aws.String(catalogID), @@ -169,11 +169,11 @@ func resourceLFTagUpdate(ctx context.Context, d *schema.ResourceData, meta inter var toAddChunks, toDeleteChunks [][]interface{} if len(toAdd.List()) > 0 { - toAddChunks = slices.Collect(slices.Chunk(toAdd.List(), lfTagsValuesMaxBatchSize)) + toAddChunks = tfslices.Chunks(toAdd.List(), lfTagsValuesMaxBatchSize) } if len(toDelete.List()) > 0 { - toDeleteChunks = slices.Collect(slices.Chunk(toDelete.List(), lfTagsValuesMaxBatchSize)) + toDeleteChunks = tfslices.Chunks(toDelete.List(), lfTagsValuesMaxBatchSize) } for { diff --git a/internal/service/neptune/cluster_parameter_group.go b/internal/service/neptune/cluster_parameter_group.go index b31cd660ef4..aa63ef7b8a8 100644 --- a/internal/service/neptune/cluster_parameter_group.go +++ b/internal/service/neptune/cluster_parameter_group.go @@ -242,7 +242,7 @@ func modifyClusterParameterGroupParameters(ctx context.Context, conn *neptune.Cl clusterParameterGroupMaxParamsBulkEdit = 20 ) // We can only modify 20 parameters at a time, so chunk them until we've got them all. - for chunk := range slices.Chunk(parameters, clusterParameterGroupMaxParamsBulkEdit) { + for _, chunk := range tfslices.Chunks(parameters, clusterParameterGroupMaxParamsBulkEdit) { input := &neptune.ModifyDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(name), Parameters: chunk, diff --git a/internal/service/neptune/parameter_group.go b/internal/service/neptune/parameter_group.go index df81d7c11bb..0c8e05589a2 100644 --- a/internal/service/neptune/parameter_group.go +++ b/internal/service/neptune/parameter_group.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "slices" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -21,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -226,7 +226,7 @@ func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, m } func addDBParameterGroupParameters(ctx context.Context, conn *neptune.Client, name string, parameters []awstypes.Parameter) error { // We can only modify 20 parameters at a time, so chunk them until we've got them all. - for chunk := range slices.Chunk(parameters, dbParameterGroupMaxParamsBulkEdit) { + for _, chunk := range tfslices.Chunks(parameters, dbParameterGroupMaxParamsBulkEdit) { input := &neptune.ModifyDBParameterGroupInput{ DBParameterGroupName: aws.String(name), Parameters: chunk, @@ -243,7 +243,7 @@ func addDBParameterGroupParameters(ctx context.Context, conn *neptune.Client, na } func delDBParameterGroupParameters(ctx context.Context, conn *neptune.Client, name string, parameters []awstypes.Parameter) error { // We can only modify 20 parameters at a time, so chunk them until we've got them all. - for chunk := range slices.Chunk(parameters, dbParameterGroupMaxParamsBulkEdit) { + for _, chunk := range tfslices.Chunks(parameters, dbParameterGroupMaxParamsBulkEdit) { input := &neptune.ResetDBParameterGroupInput{ DBParameterGroupName: aws.String(name), Parameters: chunk, diff --git a/internal/service/rds/cluster_parameter_group.go b/internal/service/rds/cluster_parameter_group.go index 28a4a0c6ae0..ccfdc6bfcdc 100644 --- a/internal/service/rds/cluster_parameter_group.go +++ b/internal/service/rds/cluster_parameter_group.go @@ -208,7 +208,7 @@ func resourceClusterParameterGroupUpdate(ctx context.Context, d *schema.Resource o, n := d.GetChange(names.AttrParameter) os, ns := o.(*schema.Set), n.(*schema.Set) - for chunk := range slices.Chunk(expandParameters(ns.Difference(os).List()), maxParamModifyChunk) { + for _, chunk := range tfslices.Chunks(expandParameters(ns.Difference(os).List()), maxParamModifyChunk) { input := &rds.ModifyDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(d.Id()), Parameters: chunk, @@ -236,7 +236,7 @@ func resourceClusterParameterGroupUpdate(ctx context.Context, d *schema.Resource } // Reset parameters that have been removed. - for chunk := range slices.Chunk(maps.Values(toRemove), maxParamModifyChunk) { + for _, chunk := range tfslices.Chunks(maps.Values(toRemove), maxParamModifyChunk) { input := &rds.ResetDBClusterParameterGroupInput{ DBClusterParameterGroupName: aws.String(d.Id()), Parameters: chunk, diff --git a/internal/service/route53/zone.go b/internal/service/route53/zone.go index 940e3bf78db..15edc9f2afb 100644 --- a/internal/service/route53/zone.go +++ b/internal/service/route53/zone.go @@ -383,7 +383,8 @@ func deleteAllResourceRecordsFromHostedZone(ctx context.Context, conn *route53.C const ( chunkSize = 100 ) - for chunk := range slices.Chunk(resourceRecordSets, chunkSize) { + chunks := tfslices.Chunks(resourceRecordSets, chunkSize) + for _, chunk := range chunks { changes := tfslices.ApplyToAll(chunk, func(v awstypes.ResourceRecordSet) awstypes.Change { return awstypes.Change{ Action: awstypes.ChangeActionDelete, diff --git a/internal/service/ssm/document.go b/internal/service/ssm/document.go index ef17345d0e3..e6ed152e142 100644 --- a/internal/service/ssm/document.go +++ b/internal/service/ssm/document.go @@ -9,7 +9,6 @@ import ( "fmt" "log" "regexp" - "slices" "strings" "time" @@ -28,6 +27,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -290,7 +290,9 @@ func resourceDocumentCreate(ctx context.Context, d *schema.ResourceData, meta in tfMap := flex.ExpandStringValueMap(v.(map[string]interface{})) if v, ok := tfMap["account_ids"]; ok && v != "" { - for chunk := range slices.Chunk(strings.Split(v, ","), documentPermissionsBatchLimit) { + chunks := tfslices.Chunks(strings.Split(v, ","), documentPermissionsBatchLimit) + + for _, chunk := range chunks { input := &ssm.ModifyDocumentPermissionInput{ AccountIdsToAdd: chunk, Name: aws.String(d.Id()), @@ -424,7 +426,7 @@ func resourceDocumentUpdate(ctx context.Context, d *schema.ResourceData, meta in } } - for chunk := range slices.Chunk(newAccountIDs.Difference(oldAccountIDs), documentPermissionsBatchLimit) { + for _, chunk := range tfslices.Chunks(newAccountIDs.Difference(oldAccountIDs), documentPermissionsBatchLimit) { input := &ssm.ModifyDocumentPermissionInput{ AccountIdsToAdd: chunk, Name: aws.String(d.Id()), @@ -438,7 +440,7 @@ func resourceDocumentUpdate(ctx context.Context, d *schema.ResourceData, meta in } } - for chunk := range slices.Chunk(oldAccountIDs.Difference(newAccountIDs), documentPermissionsBatchLimit) { + for _, chunk := range tfslices.Chunks(oldAccountIDs.Difference(newAccountIDs), documentPermissionsBatchLimit) { input := &ssm.ModifyDocumentPermissionInput{ AccountIdsToRemove: chunk, Name: aws.String(d.Id()), @@ -515,7 +517,9 @@ func resourceDocumentDelete(ctx context.Context, d *schema.ResourceData, meta in tfMap := flex.ExpandStringValueMap(v.(map[string]interface{})) if v, ok := tfMap["account_ids"]; ok && v != "" { - for chunk := range slices.Chunk(strings.Split(v, ","), documentPermissionsBatchLimit) { + chunks := tfslices.Chunks(strings.Split(v, ","), documentPermissionsBatchLimit) + + for _, chunk := range chunks { input := &ssm.ModifyDocumentPermissionInput{ AccountIdsToRemove: chunk, Name: aws.String(d.Id()), diff --git a/internal/slices/slices.go b/internal/slices/slices.go index f35b30007b3..dccfa9f3814 100644 --- a/internal/slices/slices.go +++ b/internal/slices/slices.go @@ -100,6 +100,23 @@ func Any[S ~[]E, E any](s S, f Predicate[E]) bool { return false } +// Chunks returns a slice of S, each of the specified size (or less). +func Chunks[S ~[]E, E any](s S, size int) []S { + chunks := make([]S, 0) + + for i := 0; i < len(s); i += size { + end := i + size + + if end > len(s) { + end = len(s) + } + + chunks = append(chunks, s[i:end]) + } + + return chunks +} + // AppendUnique appends unique (not already in the slice) values to a slice. func AppendUnique[S ~[]E, E comparable](s S, vs ...E) S { for _, v := range vs { diff --git a/internal/slices/slices_test.go b/internal/slices/slices_test.go index 0537d29f2cf..35ea97b2621 100644 --- a/internal/slices/slices_test.go +++ b/internal/slices/slices_test.go @@ -153,6 +153,45 @@ func TestApplyToAll(t *testing.T) { } } +func TestChunk(t *testing.T) { + t.Parallel() + + type testCase struct { + input []string + expected [][]string + } + tests := map[string]testCase{ + "three elements": { + input: []string{"one", "two", "3"}, + expected: [][]string{{"one", "two"}, {"3"}}, + }, + "two elements": { + input: []string{"aa", "bb"}, + expected: [][]string{{"aa", "bb"}}, + }, + "one element": { + input: []string{"1"}, + expected: [][]string{{"1"}}, + }, + "zero elements": { + input: []string{}, + expected: [][]string{}, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := Chunks(test.input, 2) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestFilter(t *testing.T) { t.Parallel() diff --git a/skaff/go.mod b/skaff/go.mod index 7a6fb532491..ee72fb23b43 100644 --- a/skaff/go.mod +++ b/skaff/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws/skaff -go 1.23.0 +go 1.22.6 require ( github.com/YakDriver/regexache v0.24.0 diff --git a/tools/awssdkpatch/go.mod b/tools/awssdkpatch/go.mod index e0ecf587abe..809a5404d59 100644 --- a/tools/awssdkpatch/go.mod +++ b/tools/awssdkpatch/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws/tools/awssdkpatch -go 1.23.0 +go 1.22.6 require ( github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index ceb2a351bd9..d3096c4fca0 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw -go 1.23.0 +go 1.22.6 require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0