diff --git a/.changelog/26964.txt b/.changelog/26964.txt new file mode 100644 index 00000000000..33ae415f8c8 --- /dev/null +++ b/.changelog/26964.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_kinesis_firehose_delivery_stream: Fix `InvalidArgumentException: Both BufferSizeInMBs and BufferIntervalInSeconds are required to configure buffering for lambda processor` errors on resource Update +``` \ No newline at end of file diff --git a/.changelog/35137.txt b/.changelog/35137.txt new file mode 100644 index 00000000000..e296f915cd1 --- /dev/null +++ b/.changelog/35137.txt @@ -0,0 +1,15 @@ +```release-note:bug +resource/aws_kinesis_firehose_delivery_stream: Fix perpetual `extended_s3_configuration.processing_configuration.processors.parameters` diffs when processor type is `Lambda` +``` + +```release-note:enhancement +resource/aws_kinesis_firehose_delivery_stream: Add `opensearch_configuration.document_id_options` configuration block +``` + +```release-note:enhancement +resource/aws_kinesis_firehose_delivery_stream: Adjust `elasticsearch_configuration.buffering_interval`, `http_endpoint_configuration.buffering_interval`, `opensearch_configuration.buffering_interval`, `opensearchserverless_configuration.buffering_interval`, `redshift_configuration.s3_backup_configuration.buffering_interval`,`extended_s3_configuration.s3_backup_configuration.buffering_interval`, `elasticsearch_configuration.s3_configuration.buffering_interval`, `http_endpoint_configuration.s3_configuration.buffering_interval`, `opensearch_configuration.s3_configuration.buffering_interval`, `opensearchserverless_configuration.s3_configuration.buffering_interval`, `redshift_configuration.s3_configuration.buffering_interval` and `splunk_configuration.s3_configuration.buffering_interval` minimum values to `0` to support zero buffering +``` + +```release-note:enhancement +resource/aws_kinesis_firehose_delivery_stream: Add `splunk_configuration.buffering_interval` and `splunk_configuration.buffering_size` arguments +``` \ No newline at end of file diff --git a/go.mod b/go.mod index ce7dd58b687..5ddfc6129cd 100644 --- a/go.mod +++ b/go.mod @@ -46,6 +46,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.7 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.6 github.com/aws/aws-sdk-go-v2/service/finspace v1.20.1 + github.com/aws/aws-sdk-go-v2/service/firehose v1.23.2 github.com/aws/aws-sdk-go-v2/service/fis v1.21.6 github.com/aws/aws-sdk-go-v2/service/glacier v1.19.6 github.com/aws/aws-sdk-go-v2/service/groundstation v1.23.6 diff --git a/go.sum b/go.sum index 58e145c523b..4672abd3810 100644 --- a/go.sum +++ b/go.sum @@ -117,6 +117,8 @@ github.com/aws/aws-sdk-go-v2/service/evidently v1.16.6 h1:OAK2QI3q70LnEDGppAgtGY github.com/aws/aws-sdk-go-v2/service/evidently v1.16.6/go.mod h1:Axb/eiAqyS32/FXF96ED2fau3tVbp7cdfIwL7r+y7jE= github.com/aws/aws-sdk-go-v2/service/finspace v1.20.1 h1:J/9RScsjJllziiW4MxN++QO3fr/Tlux6Qsb/CefK1o8= github.com/aws/aws-sdk-go-v2/service/finspace v1.20.1/go.mod h1:M/o8wqNpVcLTZ24lhee1rERp4hR2TUecTkh8oy1Im2o= +github.com/aws/aws-sdk-go-v2/service/firehose v1.23.2 h1:6qZsXAR0nIj+X8L0hVb8ENu9B00oZq19v9ZGYPUB3OE= +github.com/aws/aws-sdk-go-v2/service/firehose v1.23.2/go.mod h1:vHumFD15AwENJSM3SsWzcPpMK24s/7vGN1Xp5rLguz0= github.com/aws/aws-sdk-go-v2/service/fis v1.21.6 h1:3Gyxdj2gBypMNUG1E4ZJLKPyfrF47O3dL/Vo5gABh2I= github.com/aws/aws-sdk-go-v2/service/fis v1.21.6/go.mod h1:JBXrmSMlkws/lJX/W0g6nJeVgrCHUfbqDJEOI7+ga54= github.com/aws/aws-sdk-go-v2/service/glacier v1.19.6 h1:BzVx19YEwGRxXQaUYfRettlYVEEPN4nVK8CTyf+CI9A= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 5f54bb13cfc..1695bb17db5 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -40,6 +40,7 @@ import ( emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" evidently_sdkv2 "github.com/aws/aws-sdk-go-v2/service/evidently" finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" + firehose_sdkv2 "github.com/aws/aws-sdk-go-v2/service/firehose" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" glacier_sdkv2 "github.com/aws/aws-sdk-go-v2/service/glacier" groundstation_sdkv2 "github.com/aws/aws-sdk-go-v2/service/groundstation" @@ -163,7 +164,6 @@ import ( emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" eventbridge_sdkv1 "github.com/aws/aws-sdk-go/service/eventbridge" - firehose_sdkv1 "github.com/aws/aws-sdk-go/service/firehose" fms_sdkv1 "github.com/aws/aws-sdk-go/service/fms" fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" gamelift_sdkv1 "github.com/aws/aws-sdk-go/service/gamelift" @@ -634,8 +634,8 @@ func (c *AWSClient) FinSpaceClient(ctx context.Context) *finspace_sdkv2.Client { return errs.Must(client[*finspace_sdkv2.Client](ctx, c, names.FinSpace, make(map[string]any))) } -func (c *AWSClient) FirehoseConn(ctx context.Context) *firehose_sdkv1.Firehose { - return errs.Must(conn[*firehose_sdkv1.Firehose](ctx, c, names.Firehose, make(map[string]any))) +func (c *AWSClient) FirehoseClient(ctx context.Context) *firehose_sdkv2.Client { + return errs.Must(client[*firehose_sdkv2.Client](ctx, c, names.Firehose, make(map[string]any))) } func (c *AWSClient) GameLiftConn(ctx context.Context) *gamelift_sdkv1.GameLift { diff --git a/internal/service/firehose/delivery_stream.go b/internal/service/firehose/delivery_stream.go index 46af7168129..5b2d09cf744 100644 --- a/internal/service/firehose/delivery_stream.go +++ b/internal/service/firehose/delivery_stream.go @@ -11,35 +11,41 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/firehose" + "github.com/aws/aws-sdk-go-v2/service/firehose/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "golang.org/x/exp/slices" ) +type destinationType string + const ( - destinationTypeElasticsearch = "elasticsearch" - destinationTypeExtendedS3 = "extended_s3" - destinationTypeHTTPEndpoint = "http_endpoint" - destinationTypeOpenSearch = "opensearch" - destinationTypeOpenSearchServerless = "opensearchserverless" - destinationTypeRedshift = "redshift" - destinationTypeSplunk = "splunk" + destinationTypeElasticsearch destinationType = "elasticsearch" + destinationTypeExtendedS3 destinationType = "extended_s3" + destinationTypeHTTPEndpoint destinationType = "http_endpoint" + destinationTypeOpenSearch destinationType = "opensearch" + destinationTypeOpenSearchServerless destinationType = "opensearchserverless" + destinationTypeRedshift destinationType = "redshift" + destinationTypeSplunk destinationType = "splunk" ) -func destinationType_Values() []string { - return []string{ +func (destinationType) Values() []destinationType { + return []destinationType{ destinationTypeElasticsearch, destinationTypeExtendedS3, destinationTypeHTTPEndpoint, @@ -52,7 +58,7 @@ func destinationType_Values() []string { // @SDKResource("aws_kinesis_firehose_delivery_stream", name="Delivery Stream") // @Tags(identifierAttribute="name") -func ResourceDeliveryStream() *schema.Resource { +func resourceDeliveryStream() *schema.Resource { //lintignore:R011 return &schema.Resource{ CreateWithoutTimeout: resourceDeliveryStreamCreate, @@ -152,14 +158,16 @@ func ResourceDeliveryStream() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "parameters": { + // See AWS::KinesisFirehose::DeliveryStream CloudFormation resource schema. + // uniqueItems is true and insertionOrder is true. Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "parameter_name": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(firehose.ProcessorParameterName_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ProcessorParameterName](), }, "parameter_value": { Type: schema.TypeString, @@ -170,9 +178,9 @@ func ResourceDeliveryStream() *schema.Resource { }, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(firehose.ProcessorType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ProcessorType](), }, }, }, @@ -206,10 +214,10 @@ func ResourceDeliveryStream() *schema.Resource { }, }, "content_encoding": { - Type: schema.TypeString, - Optional: true, - Default: firehose.ContentEncodingNone, - ValidateFunc: validation.StringInSlice(firehose.ContentEncoding_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ContentEncodingNone, + ValidateDiagFunc: enum.Validate[types.ContentEncoding](), }, }, }, @@ -224,10 +232,9 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: verify.ValidARN, }, "buffering_interval": { - Type: schema.TypeInt, - Optional: true, - Default: 300, - ValidateFunc: validation.IntAtLeast(60), + Type: schema.TypeInt, + Optional: true, + Default: 300, }, "buffering_size": { Type: schema.TypeInt, @@ -237,10 +244,10 @@ func ResourceDeliveryStream() *schema.Resource { }, "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), "compression_format": { - Type: schema.TypeString, - Optional: true, - Default: firehose.CompressionFormatUncompressed, - ValidateFunc: validation.StringInSlice(firehose.CompressionFormat_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.CompressionFormatUncompressed, + ValidateDiagFunc: enum.Validate[types.CompressionFormat](), }, "error_output_prefix": { Type: schema.TypeString, @@ -295,7 +302,7 @@ func ResourceDeliveryStream() *schema.Resource { value := v.(string) return strings.ToLower(value) }, - ValidateFunc: validation.StringInSlice(destinationType_Values(), false), + ValidateDiagFunc: enum.Validate[destinationType](), }, "destination_id": { Type: schema.TypeString, @@ -312,7 +319,7 @@ func ResourceDeliveryStream() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 300, - ValidateFunc: validation.IntBetween(60, 900), + ValidateFunc: validation.IntBetween(0, 900), }, "buffering_size": { Type: schema.TypeInt, @@ -337,10 +344,10 @@ func ResourceDeliveryStream() *schema.Resource { Required: true, }, "index_rotation_period": { - Type: schema.TypeString, - Optional: true, - Default: firehose.ElasticsearchIndexRotationPeriodOneDay, - ValidateFunc: validation.StringInSlice(firehose.ElasticsearchIndexRotationPeriod_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ElasticsearchIndexRotationPeriodOneDay, + ValidateDiagFunc: enum.Validate[types.ElasticsearchIndexRotationPeriod](), }, "processing_configuration": processingConfigurationSchema(), "retry_duration": { @@ -355,11 +362,11 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: verify.ValidARN, }, "s3_backup_mode": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: firehose.ElasticsearchS3BackupModeFailedDocumentsOnly, - ValidateFunc: validation.StringInSlice(firehose.ElasticsearchS3BackupMode_Values(), false), + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: types.ElasticsearchS3BackupModeFailedDocumentsOnly, + ValidateDiagFunc: enum.Validate[types.ElasticsearchS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), "type_name": { @@ -425,10 +432,10 @@ func ResourceDeliveryStream() *schema.Resource { }, "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), "compression_format": { - Type: schema.TypeString, - Optional: true, - Default: firehose.CompressionFormatUncompressed, - ValidateFunc: validation.StringInSlice(firehose.CompressionFormat_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.CompressionFormatUncompressed, + ValidateDiagFunc: enum.Validate[types.CompressionFormat](), }, "data_format_conversion_configuration": { Type: schema.TypeList, @@ -537,10 +544,10 @@ func ResourceDeliveryStream() *schema.Resource { Default: 0.05, }, "compression": { - Type: schema.TypeString, - Optional: true, - Default: firehose.OrcCompressionSnappy, - ValidateFunc: validation.StringInSlice(firehose.OrcCompression_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.OrcCompressionSnappy, + ValidateDiagFunc: enum.Validate[types.OrcCompression](), }, "dictionary_key_threshold": { Type: schema.TypeFloat, @@ -553,10 +560,10 @@ func ResourceDeliveryStream() *schema.Resource { Default: false, }, "format_version": { - Type: schema.TypeString, - Optional: true, - Default: firehose.OrcFormatVersionV012, - ValidateFunc: validation.StringInSlice(firehose.OrcFormatVersion_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.OrcFormatVersionV012, + ValidateDiagFunc: enum.Validate[types.OrcFormatVersion](), }, "padding_tolerance": { Type: schema.TypeFloat, @@ -596,10 +603,10 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: validation.IntAtLeast(67108864), }, "compression": { - Type: schema.TypeString, - Optional: true, - Default: firehose.ParquetCompressionSnappy, - ValidateFunc: validation.StringInSlice(firehose.ParquetCompression_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ParquetCompressionSnappy, + ValidateDiagFunc: enum.Validate[types.ParquetCompression](), }, "enable_dictionary_compression": { Type: schema.TypeBool, @@ -620,10 +627,10 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: validation.IntAtLeast(65536), }, "writer_version": { - Type: schema.TypeString, - Optional: true, - Default: firehose.ParquetWriterVersionV1, - ValidateFunc: validation.StringInSlice(firehose.ParquetWriterVersion_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ParquetWriterVersionV1, + ValidateDiagFunc: enum.Validate[types.ParquetWriterVersion](), }, }, }, @@ -697,10 +704,10 @@ func ResourceDeliveryStream() *schema.Resource { }, "s3_backup_configuration": s3BackupConfigurationSchema(), "s3_backup_mode": { - Type: schema.TypeString, - Optional: true, - Default: firehose.S3BackupModeDisabled, - ValidateFunc: validation.StringInSlice(firehose.S3BackupMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.S3BackupModeDisabled, + ValidateDiagFunc: enum.Validate[types.S3BackupMode](), }, }, }, @@ -721,7 +728,7 @@ func ResourceDeliveryStream() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 300, - ValidateFunc: validation.IntBetween(60, 900), + ValidateFunc: validation.IntBetween(0, 900), }, "buffering_size": { Type: schema.TypeInt, @@ -751,10 +758,10 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: verify.ValidARN, }, "s3_backup_mode": { - Type: schema.TypeString, - Optional: true, - Default: firehose.HttpEndpointS3BackupModeFailedDataOnly, - ValidateFunc: validation.StringInSlice(firehose.HttpEndpointS3BackupMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.HttpEndpointS3BackupModeFailedDataOnly, + ValidateDiagFunc: enum.Validate[types.HttpEndpointS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), "url": { @@ -807,10 +814,10 @@ func ResourceDeliveryStream() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "connectivity": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(firehose.Connectivity_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.Connectivity](), }, "role_arn": { Type: schema.TypeString, @@ -851,7 +858,7 @@ func ResourceDeliveryStream() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 300, - ValidateFunc: validation.IntBetween(60, 900), + ValidateFunc: validation.IntBetween(0, 900), }, "buffering_size": { Type: schema.TypeInt, @@ -865,6 +872,20 @@ func ResourceDeliveryStream() *schema.Resource { Optional: true, ConflictsWith: []string{"opensearch_configuration.0.domain_arn"}, }, + "document_id_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_document_id_format": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.DefaultDocumentIdFormat](), + }, + }, + }, + }, "domain_arn": { Type: schema.TypeString, Optional: true, @@ -876,10 +897,10 @@ func ResourceDeliveryStream() *schema.Resource { Required: true, }, "index_rotation_period": { - Type: schema.TypeString, - Optional: true, - Default: firehose.AmazonopensearchserviceIndexRotationPeriodOneDay, - ValidateFunc: validation.StringInSlice(firehose.AmazonopensearchserviceIndexRotationPeriod_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.AmazonopensearchserviceIndexRotationPeriodOneDay, + ValidateDiagFunc: enum.Validate[types.AmazonopensearchserviceIndexRotationPeriod](), }, "processing_configuration": processingConfigurationSchema(), "retry_duration": { @@ -894,11 +915,11 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: verify.ValidARN, }, "s3_backup_mode": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: firehose.AmazonopensearchserviceS3BackupModeFailedDocumentsOnly, - ValidateFunc: validation.StringInSlice(firehose.AmazonopensearchserviceS3BackupMode_Values(), false), + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: types.AmazonopensearchserviceS3BackupModeFailedDocumentsOnly, + ValidateDiagFunc: enum.Validate[types.AmazonopensearchserviceS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), "type_name": { @@ -951,7 +972,7 @@ func ResourceDeliveryStream() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 300, - ValidateFunc: validation.IntBetween(60, 900), + ValidateFunc: validation.IntBetween(0, 900), }, "buffering_size": { Type: schema.TypeInt, @@ -981,11 +1002,11 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: verify.ValidARN, }, "s3_backup_mode": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: firehose.AmazonOpenSearchServerlessS3BackupModeFailedDocumentsOnly, - ValidateFunc: validation.StringInSlice(firehose.AmazonOpenSearchServerlessS3BackupMode_Values(), false), + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: types.AmazonOpenSearchServerlessS3BackupModeFailedDocumentsOnly, + ValidateDiagFunc: enum.Validate[types.AmazonOpenSearchServerlessS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), "vpc_config": { @@ -1065,10 +1086,10 @@ func ResourceDeliveryStream() *schema.Resource { }, "s3_backup_configuration": s3BackupConfigurationSchema(), "s3_backup_mode": { - Type: schema.TypeString, - Optional: true, - Default: firehose.S3BackupModeDisabled, - ValidateFunc: validation.StringInSlice(firehose.S3BackupMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.RedshiftS3BackupModeDisabled, + ValidateDiagFunc: enum.Validate[types.RedshiftS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), "username": { @@ -1098,11 +1119,11 @@ func ResourceDeliveryStream() *schema.Resource { RequiredWith: []string{"server_side_encryption.0.enabled", "server_side_encryption.0.key_type"}, }, "key_type": { - Type: schema.TypeString, - Optional: true, - Default: firehose.KeyTypeAwsOwnedCmk, - ValidateFunc: validation.StringInSlice(firehose.KeyType_Values(), false), - RequiredWith: []string{"server_side_encryption.0.enabled"}, + Type: schema.TypeString, + Optional: true, + Default: types.KeyTypeAwsOwnedCmk, + ValidateDiagFunc: enum.Validate[types.KeyType](), + RequiredWith: []string{"server_side_encryption.0.enabled"}, }, }, }, @@ -1113,6 +1134,18 @@ func ResourceDeliveryStream() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "buffering_interval": { + Type: schema.TypeInt, + Optional: true, + Default: 60, + ValidateFunc: validation.IntBetween(0, 60), + }, + "buffering_size": { + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: validation.IntBetween(1, 5), + }, "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), "hec_acknowledgment_timeout": { Type: schema.TypeInt, @@ -1125,10 +1158,10 @@ func ResourceDeliveryStream() *schema.Resource { Required: true, }, "hec_endpoint_type": { - Type: schema.TypeString, - Optional: true, - Default: firehose.HECEndpointTypeRaw, - ValidateFunc: validation.StringInSlice(firehose.HECEndpointType_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.HECEndpointTypeRaw, + ValidateDiagFunc: enum.Validate[types.HECEndpointType](), }, "hec_token": { Type: schema.TypeString, @@ -1142,10 +1175,10 @@ func ResourceDeliveryStream() *schema.Resource { ValidateFunc: validation.IntBetween(0, 7200), }, "s3_backup_mode": { - Type: schema.TypeString, - Optional: true, - Default: firehose.SplunkS3BackupModeFailedEventsOnly, - ValidateFunc: validation.StringInSlice(firehose.SplunkS3BackupMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.SplunkS3BackupModeFailedEventsOnly, + ValidateDiagFunc: enum.Validate[types.SplunkS3BackupMode](), }, "s3_configuration": s3ConfigurationSchema(), }, @@ -1164,8 +1197,8 @@ func ResourceDeliveryStream() *schema.Resource { CustomizeDiff: customdiff.All( verify.SetTagsDiff, func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { - destination := d.Get("destination").(string) - requiredAttribute := map[string]string{ + destination := destinationType(d.Get("destination").(string)) + requiredAttribute := map[destinationType]string{ destinationTypeElasticsearch: "elasticsearch_configuration", destinationTypeExtendedS3: "extended_s3_configuration", destinationTypeHTTPEndpoint: "http_endpoint_configuration", @@ -1187,24 +1220,24 @@ func ResourceDeliveryStream() *schema.Resource { func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FirehoseConn(ctx) + conn := meta.(*conns.AWSClient).FirehoseClient(ctx) sn := d.Get("name").(string) input := &firehose.CreateDeliveryStreamInput{ DeliveryStreamName: aws.String(sn), - DeliveryStreamType: aws.String(firehose.DeliveryStreamTypeDirectPut), + DeliveryStreamType: types.DeliveryStreamTypeDirectPut, Tags: getTagsIn(ctx), } if v, ok := d.GetOk("kinesis_source_configuration"); ok { - input.DeliveryStreamType = aws.String(firehose.DeliveryStreamTypeKinesisStreamAsSource) + input.DeliveryStreamType = types.DeliveryStreamTypeKinesisStreamAsSource input.KinesisStreamSourceConfiguration = expandKinesisStreamSourceConfiguration(v.([]interface{})[0].(map[string]interface{})) } else if v, ok := d.GetOk("msk_source_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.DeliveryStreamType = aws.String(firehose.DeliveryStreamTypeMskasSource) + input.DeliveryStreamType = types.DeliveryStreamTypeMSKAsSource input.MSKSourceConfiguration = expandMSKSourceConfiguration(v.([]interface{})[0].(map[string]interface{})) } - switch d.Get("destination").(string) { + switch v := destinationType(d.Get("destination").(string)); v { case destinationTypeElasticsearch: if v, ok := d.GetOk("elasticsearch_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.ElasticsearchDestinationConfiguration = expandElasticsearchDestinationConfiguration(v.([]interface{})[0].(map[string]interface{})) @@ -1236,7 +1269,7 @@ func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, m } _, err := retryDeliveryStreamOp(ctx, func() (interface{}, error) { - return conn.CreateDeliveryStreamWithContext(ctx, input) + return conn.CreateDeliveryStream(ctx, input) }) if err != nil { @@ -1249,7 +1282,7 @@ func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "waiting for Kinesis Firehose Delivery Stream (%s) create: %s", sn, err) } - d.SetId(aws.StringValue(output.DeliveryStreamARN)) + d.SetId(aws.ToString(output.DeliveryStreamARN)) if v, ok := d.GetOk("server_side_encryption"); ok && !isDeliveryStreamOptionDisabled(v) { input := &firehose.StartDeliveryStreamEncryptionInput{ @@ -1257,7 +1290,7 @@ func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, m DeliveryStreamName: aws.String(sn), } - _, err := conn.StartDeliveryStreamEncryptionWithContext(ctx, input) + _, err := conn.StartDeliveryStreamEncryption(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "starting Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) @@ -1273,10 +1306,10 @@ func resourceDeliveryStreamCreate(ctx context.Context, d *schema.ResourceData, m func resourceDeliveryStreamRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FirehoseConn(ctx) + conn := meta.(*conns.AWSClient).FirehoseClient(ctx) sn := d.Get("name").(string) - s, err := FindDeliveryStreamByName(ctx, conn, sn) + s, err := findDeliveryStreamByName(ctx, conn, sn) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Kinesis Firehose Delivery Stream (%s) not found, removing from state", d.Id()) @@ -1306,16 +1339,14 @@ func resourceDeliveryStreamRead(ctx context.Context, d *schema.ResourceData, met sseOptions := map[string]interface{}{ "enabled": false, - "key_type": firehose.KeyTypeAwsOwnedCmk, + "key_type": types.KeyTypeAwsOwnedCmk, } - if s.DeliveryStreamEncryptionConfiguration != nil && aws.StringValue(s.DeliveryStreamEncryptionConfiguration.Status) == firehose.DeliveryStreamEncryptionStatusEnabled { + if s.DeliveryStreamEncryptionConfiguration != nil && s.DeliveryStreamEncryptionConfiguration.Status == types.DeliveryStreamEncryptionStatusEnabled { sseOptions["enabled"] = true + sseOptions["key_type"] = s.DeliveryStreamEncryptionConfiguration.KeyType if v := s.DeliveryStreamEncryptionConfiguration.KeyARN; v != nil { - sseOptions["key_arn"] = aws.StringValue(v) - } - if v := s.DeliveryStreamEncryptionConfiguration.KeyType; v != nil { - sseOptions["key_type"] = aws.StringValue(v) + sseOptions["key_arn"] = aws.ToString(v) } } if err := d.Set("server_side_encryption", []map[string]interface{}{sseOptions}); err != nil { @@ -1371,7 +1402,7 @@ func resourceDeliveryStreamRead(ctx context.Context, d *schema.ResourceData, met func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FirehoseConn(ctx) + conn := meta.(*conns.AWSClient).FirehoseClient(ctx) sn := d.Get("name").(string) @@ -1382,7 +1413,7 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m DestinationId: aws.String(d.Get("destination_id").(string)), } - switch d.Get("destination").(string) { + switch v := destinationType(d.Get("destination").(string)); v { case destinationTypeElasticsearch: if v, ok := d.GetOk("elasticsearch_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.ElasticsearchDestinationUpdate = expandElasticsearchDestinationUpdate(v.([]interface{})[0].(map[string]interface{})) @@ -1414,7 +1445,7 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m } _, err := retryDeliveryStreamOp(ctx, func() (interface{}, error) { - return conn.UpdateDestinationWithContext(ctx, input) + return conn.UpdateDestination(ctx, input) }) if err != nil { @@ -1429,7 +1460,7 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m DeliveryStreamName: aws.String(sn), } - _, err := conn.StopDeliveryStreamEncryptionWithContext(ctx, input) + _, err := conn.StopDeliveryStreamEncryption(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "stopping Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) @@ -1444,7 +1475,7 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m DeliveryStreamName: aws.String(sn), } - _, err := conn.StartDeliveryStreamEncryptionWithContext(ctx, input) + _, err := conn.StartDeliveryStreamEncryption(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "starting Kinesis Firehose Delivery Stream (%s) encryption: %s", sn, err) @@ -1461,16 +1492,16 @@ func resourceDeliveryStreamUpdate(ctx context.Context, d *schema.ResourceData, m func resourceDeliveryStreamDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FirehoseConn(ctx) + conn := meta.(*conns.AWSClient).FirehoseClient(ctx) sn := d.Get("name").(string) log.Printf("[DEBUG] Deleting Kinesis Firehose Delivery Stream: (%s)", sn) - _, err := conn.DeleteDeliveryStreamWithContext(ctx, &firehose.DeleteDeliveryStreamInput{ + _, err := conn.DeleteDeliveryStream(ctx, &firehose.DeleteDeliveryStreamInput{ DeliveryStreamName: aws.String(sn), }) - if tfawserr.ErrCodeEquals(err, firehose.ErrCodeResourceNotFoundException) { + if errs.IsA[*types.ResourceNotFoundException](err) { return diags } @@ -1490,20 +1521,20 @@ func retryDeliveryStreamOp(ctx context.Context, f func() (interface{}, error)) ( f, func(err error) (bool, error) { // Access was denied when calling Glue. Please ensure that the role specified in the data format conversion configuration has the necessary permissions. - if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Access was denied") { + if errs.IsAErrorMessageContains[*types.InvalidArgumentException](err, "Access was denied") { return true, err } - if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "is not authorized to") { + if errs.IsAErrorMessageContains[*types.InvalidArgumentException](err, "is not authorized to") { return true, err } - if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Please make sure the role specified in VpcConfiguration has permissions") { + if errs.IsAErrorMessageContains[*types.InvalidArgumentException](err, "Please make sure the role specified in VpcConfiguration has permissions") { return true, err } // InvalidArgumentException: Verify that the IAM role has access to the Elasticsearch domain. - if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Verify that the IAM role has access") { + if errs.IsAErrorMessageContains[*types.InvalidArgumentException](err, "Verify that the IAM role has access") { return true, err } - if tfawserr.ErrMessageContains(err, firehose.ErrCodeInvalidArgumentException, "Firehose is unable to assume role") { + if errs.IsAErrorMessageContains[*types.InvalidArgumentException](err, "Firehose is unable to assume role") { return true, err } return false, err @@ -1511,8 +1542,163 @@ func retryDeliveryStreamOp(ctx context.Context, f func() (interface{}, error)) ( ) } -func expandKinesisStreamSourceConfiguration(source map[string]interface{}) *firehose.KinesisStreamSourceConfiguration { - configuration := &firehose.KinesisStreamSourceConfiguration{ +func findDeliveryStreamByName(ctx context.Context, conn *firehose.Client, name string) (*types.DeliveryStreamDescription, error) { + input := &firehose.DescribeDeliveryStreamInput{ + DeliveryStreamName: aws.String(name), + } + + output, err := conn.DescribeDeliveryStream(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.DeliveryStreamDescription == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.DeliveryStreamDescription, nil +} + +func statusDeliveryStream(ctx context.Context, conn *firehose.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDeliveryStreamByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.DeliveryStreamStatus), nil + } +} + +func waitDeliveryStreamCreated(ctx context.Context, conn *firehose.Client, name string, timeout time.Duration) (*types.DeliveryStreamDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.DeliveryStreamStatusCreating), + Target: enum.Slice(types.DeliveryStreamStatusActive), + Refresh: statusDeliveryStream(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DeliveryStreamDescription); ok { + if status, failureDescription := output.DeliveryStreamStatus, output.FailureDescription; status == types.DeliveryStreamStatusCreatingFailed && failureDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", failureDescription.Type, aws.ToString(failureDescription.Details))) + } + + return output, err + } + + return nil, err +} + +func waitDeliveryStreamDeleted(ctx context.Context, conn *firehose.Client, name string, timeout time.Duration) (*types.DeliveryStreamDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.DeliveryStreamStatusDeleting), + Target: []string{}, + Refresh: statusDeliveryStream(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DeliveryStreamDescription); ok { + if status, failureDescription := output.DeliveryStreamStatus, output.FailureDescription; status == types.DeliveryStreamStatusDeletingFailed && failureDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", failureDescription.Type, aws.ToString(failureDescription.Details))) + } + + return output, err + } + + return nil, err +} + +func findDeliveryStreamEncryptionConfigurationByName(ctx context.Context, conn *firehose.Client, name string) (*types.DeliveryStreamEncryptionConfiguration, error) { + output, err := findDeliveryStreamByName(ctx, conn, name) + + if err != nil { + return nil, err + } + + if output.DeliveryStreamEncryptionConfiguration == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + return output.DeliveryStreamEncryptionConfiguration, nil +} + +func statusDeliveryStreamEncryptionConfiguration(ctx context.Context, conn *firehose.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDeliveryStreamEncryptionConfigurationByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitDeliveryStreamEncryptionEnabled(ctx context.Context, conn *firehose.Client, name string, timeout time.Duration) (*types.DeliveryStreamEncryptionConfiguration, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.DeliveryStreamEncryptionStatusEnabling), + Target: enum.Slice(types.DeliveryStreamEncryptionStatusEnabled), + Refresh: statusDeliveryStreamEncryptionConfiguration(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DeliveryStreamEncryptionConfiguration); ok { + if status, failureDescription := output.Status, output.FailureDescription; status == types.DeliveryStreamEncryptionStatusEnablingFailed && failureDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", failureDescription.Type, aws.ToString(failureDescription.Details))) + } + + return output, err + } + + return nil, err +} + +func waitDeliveryStreamEncryptionDisabled(ctx context.Context, conn *firehose.Client, name string, timeout time.Duration) (*types.DeliveryStreamEncryptionConfiguration, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.DeliveryStreamEncryptionStatusDisabling), + Target: enum.Slice(types.DeliveryStreamEncryptionStatusDisabled), + Refresh: statusDeliveryStreamEncryptionConfiguration(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*types.DeliveryStreamEncryptionConfiguration); ok { + if status, failureDescription := output.Status, output.FailureDescription; status == types.DeliveryStreamEncryptionStatusDisablingFailed && failureDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", failureDescription.Type, aws.ToString(failureDescription.Details))) + } + + return output, err + } + + return nil, err +} + +func expandKinesisStreamSourceConfiguration(source map[string]interface{}) *types.KinesisStreamSourceConfiguration { + configuration := &types.KinesisStreamSourceConfiguration{ KinesisStreamARN: aws.String(source["kinesis_stream_arn"].(string)), RoleARN: aws.String(source["role_arn"].(string)), } @@ -1520,18 +1706,18 @@ func expandKinesisStreamSourceConfiguration(source map[string]interface{}) *fire return configuration } -func expandS3DestinationConfiguration(tfList []interface{}) *firehose.S3DestinationConfiguration { +func expandS3DestinationConfiguration(tfList []interface{}) *types.S3DestinationConfiguration { s3 := tfList[0].(map[string]interface{}) - configuration := &firehose.S3DestinationConfiguration{ + configuration := &types.S3DestinationConfiguration{ BucketARN: aws.String(s3["bucket_arn"].(string)), RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), EncryptionConfiguration: expandEncryptionConfiguration(s3), } @@ -1546,7 +1732,7 @@ func expandS3DestinationConfiguration(tfList []interface{}) *firehose.S3Destinat return configuration } -func expandS3DestinationConfigurationBackup(d map[string]interface{}) *firehose.S3DestinationConfiguration { +func expandS3DestinationConfigurationBackup(d map[string]interface{}) *types.S3DestinationConfiguration { config := d["s3_backup_configuration"].([]interface{}) if len(config) == 0 { return nil @@ -1554,15 +1740,15 @@ func expandS3DestinationConfigurationBackup(d map[string]interface{}) *firehose. s3 := config[0].(map[string]interface{}) - configuration := &firehose.S3DestinationConfiguration{ + configuration := &types.S3DestinationConfiguration{ BucketARN: aws.String(s3["bucket_arn"].(string)), RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), EncryptionConfiguration: expandEncryptionConfiguration(s3), } @@ -1577,22 +1763,23 @@ func expandS3DestinationConfigurationBackup(d map[string]interface{}) *firehose. return configuration } -func expandExtendedS3DestinationConfiguration(s3 map[string]interface{}) *firehose.ExtendedS3DestinationConfiguration { - configuration := &firehose.ExtendedS3DestinationConfiguration{ +func expandExtendedS3DestinationConfiguration(s3 map[string]interface{}) *types.ExtendedS3DestinationConfiguration { + roleARN := s3["role_arn"].(string) + configuration := &types.ExtendedS3DestinationConfiguration{ BucketARN: aws.String(s3["bucket_arn"].(string)), - RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))), + RoleARN: aws.String(roleARN), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), DataFormatConversionConfiguration: expandDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), EncryptionConfiguration: expandEncryptionConfiguration(s3), } if _, ok := s3["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(s3) + configuration.ProcessingConfiguration = expandProcessingConfiguration(s3, destinationTypeExtendedS3, roleARN) } if _, ok := s3["dynamic_partitioning_configuration"]; ok { @@ -1608,25 +1795,25 @@ func expandExtendedS3DestinationConfiguration(s3 map[string]interface{}) *fireho } if s3BackupMode, ok := s3["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.S3BackupMode(s3BackupMode.(string)) configuration.S3BackupConfiguration = expandS3DestinationConfigurationBackup(s3) } return configuration } -func expandS3DestinationUpdate(tfList []interface{}) *firehose.S3DestinationUpdate { +func expandS3DestinationUpdate(tfList []interface{}) *types.S3DestinationUpdate { s3 := tfList[0].(map[string]interface{}) - configuration := &firehose.S3DestinationUpdate{ + configuration := &types.S3DestinationUpdate{ BucketARN: aws.String(s3["bucket_arn"].(string)), RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)), Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), EncryptionConfiguration: expandEncryptionConfiguration(s3), CloudWatchLoggingOptions: expandCloudWatchLoggingOptions(s3), } @@ -1638,7 +1825,7 @@ func expandS3DestinationUpdate(tfList []interface{}) *firehose.S3DestinationUpda return configuration } -func expandS3DestinationUpdateBackup(d map[string]interface{}) *firehose.S3DestinationUpdate { +func expandS3DestinationUpdateBackup(d map[string]interface{}) *types.S3DestinationUpdate { config := d["s3_backup_configuration"].([]interface{}) if len(config) == 0 { return nil @@ -1646,16 +1833,16 @@ func expandS3DestinationUpdateBackup(d map[string]interface{}) *firehose.S3Desti s3 := config[0].(map[string]interface{}) - configuration := &firehose.S3DestinationUpdate{ + configuration := &types.S3DestinationUpdate{ BucketARN: aws.String(s3["bucket_arn"].(string)), RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)), Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), EncryptionConfiguration: expandEncryptionConfiguration(s3), CloudWatchLoggingOptions: expandCloudWatchLoggingOptions(s3), } @@ -1667,21 +1854,22 @@ func expandS3DestinationUpdateBackup(d map[string]interface{}) *firehose.S3Desti return configuration } -func expandExtendedS3DestinationUpdate(s3 map[string]interface{}) *firehose.ExtendedS3DestinationUpdate { - configuration := &firehose.ExtendedS3DestinationUpdate{ +func expandExtendedS3DestinationUpdate(s3 map[string]interface{}) *types.ExtendedS3DestinationUpdate { + roleARN := s3["role_arn"].(string) + configuration := &types.ExtendedS3DestinationUpdate{ BucketARN: aws.String(s3["bucket_arn"].(string)), - RoleARN: aws.String(s3["role_arn"].(string)), - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))), - SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))), + RoleARN: aws.String(roleARN), + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(int32(s3["buffering_interval"].(int))), + SizeInMBs: aws.Int32(int32(s3["buffering_size"].(int))), }, ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)), Prefix: expandPrefix(s3), - CompressionFormat: aws.String(s3["compression_format"].(string)), + CompressionFormat: types.CompressionFormat(s3["compression_format"].(string)), EncryptionConfiguration: expandEncryptionConfiguration(s3), DataFormatConversionConfiguration: expandDataFormatConversionConfiguration(s3["data_format_conversion_configuration"].([]interface{})), CloudWatchLoggingOptions: expandCloudWatchLoggingOptions(s3), - ProcessingConfiguration: expandProcessingConfiguration(s3), + ProcessingConfiguration: expandProcessingConfiguration(s3, destinationTypeExtendedS3, roleARN), } if _, ok := s3["cloudwatch_logging_options"]; ok { @@ -1693,25 +1881,25 @@ func expandExtendedS3DestinationUpdate(s3 map[string]interface{}) *firehose.Exte } if s3BackupMode, ok := s3["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.S3BackupMode(s3BackupMode.(string)) configuration.S3BackupUpdate = expandS3DestinationUpdateBackup(s3) } return configuration } -func expandDataFormatConversionConfiguration(l []interface{}) *firehose.DataFormatConversionConfiguration { +func expandDataFormatConversionConfiguration(l []interface{}) *types.DataFormatConversionConfiguration { if len(l) == 0 || l[0] == nil { // It is possible to just pass nil here, but this seems to be the // canonical form that AWS uses, and is less likely to produce diffs. - return &firehose.DataFormatConversionConfiguration{ + return &types.DataFormatConversionConfiguration{ Enabled: aws.Bool(false), } } m := l[0].(map[string]interface{}) - return &firehose.DataFormatConversionConfiguration{ + return &types.DataFormatConversionConfiguration{ Enabled: aws.Bool(m["enabled"].(bool)), InputFormatConfiguration: expandInputFormatConfiguration(m["input_format_configuration"].([]interface{})), OutputFormatConfiguration: expandOutputFormatConfiguration(m["output_format_configuration"].([]interface{})), @@ -1719,149 +1907,149 @@ func expandDataFormatConversionConfiguration(l []interface{}) *firehose.DataForm } } -func expandInputFormatConfiguration(l []interface{}) *firehose.InputFormatConfiguration { +func expandInputFormatConfiguration(l []interface{}) *types.InputFormatConfiguration { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - return &firehose.InputFormatConfiguration{ + return &types.InputFormatConfiguration{ Deserializer: expandDeserializer(m["deserializer"].([]interface{})), } } -func expandDeserializer(l []interface{}) *firehose.Deserializer { +func expandDeserializer(l []interface{}) *types.Deserializer { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - return &firehose.Deserializer{ + return &types.Deserializer{ HiveJsonSerDe: expandHiveJSONSerDe(m["hive_json_ser_de"].([]interface{})), OpenXJsonSerDe: expandOpenXJSONSerDe(m["open_x_json_ser_de"].([]interface{})), } } -func expandHiveJSONSerDe(l []interface{}) *firehose.HiveJsonSerDe { +func expandHiveJSONSerDe(l []interface{}) *types.HiveJsonSerDe { if len(l) == 0 { return nil } if l[0] == nil { - return &firehose.HiveJsonSerDe{} + return &types.HiveJsonSerDe{} } m := l[0].(map[string]interface{}) - return &firehose.HiveJsonSerDe{ - TimestampFormats: flex.ExpandStringList(m["timestamp_formats"].([]interface{})), + return &types.HiveJsonSerDe{ + TimestampFormats: flex.ExpandStringValueList(m["timestamp_formats"].([]interface{})), } } -func expandOpenXJSONSerDe(l []interface{}) *firehose.OpenXJsonSerDe { +func expandOpenXJSONSerDe(l []interface{}) *types.OpenXJsonSerDe { if len(l) == 0 { return nil } if l[0] == nil { - return &firehose.OpenXJsonSerDe{} + return &types.OpenXJsonSerDe{} } m := l[0].(map[string]interface{}) - return &firehose.OpenXJsonSerDe{ + return &types.OpenXJsonSerDe{ CaseInsensitive: aws.Bool(m["case_insensitive"].(bool)), - ColumnToJsonKeyMappings: flex.ExpandStringMap(m["column_to_json_key_mappings"].(map[string]interface{})), + ColumnToJsonKeyMappings: flex.ExpandStringValueMap(m["column_to_json_key_mappings"].(map[string]interface{})), ConvertDotsInJsonKeysToUnderscores: aws.Bool(m["convert_dots_in_json_keys_to_underscores"].(bool)), } } -func expandOutputFormatConfiguration(l []interface{}) *firehose.OutputFormatConfiguration { +func expandOutputFormatConfiguration(l []interface{}) *types.OutputFormatConfiguration { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - return &firehose.OutputFormatConfiguration{ + return &types.OutputFormatConfiguration{ Serializer: expandSerializer(m["serializer"].([]interface{})), } } -func expandSerializer(l []interface{}) *firehose.Serializer { +func expandSerializer(l []interface{}) *types.Serializer { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - return &firehose.Serializer{ + return &types.Serializer{ OrcSerDe: expandOrcSerDe(m["orc_ser_de"].([]interface{})), ParquetSerDe: expandParquetSerDe(m["parquet_ser_de"].([]interface{})), } } -func expandOrcSerDe(l []interface{}) *firehose.OrcSerDe { +func expandOrcSerDe(l []interface{}) *types.OrcSerDe { if len(l) == 0 { return nil } if l[0] == nil { - return &firehose.OrcSerDe{} + return &types.OrcSerDe{} } m := l[0].(map[string]interface{}) - orcSerDe := &firehose.OrcSerDe{ - BlockSizeBytes: aws.Int64(int64(m["block_size_bytes"].(int))), + orcSerDe := &types.OrcSerDe{ + BlockSizeBytes: aws.Int32(int32(m["block_size_bytes"].(int))), BloomFilterFalsePositiveProbability: aws.Float64(m["bloom_filter_false_positive_probability"].(float64)), - Compression: aws.String(m["compression"].(string)), + Compression: types.OrcCompression(m["compression"].(string)), DictionaryKeyThreshold: aws.Float64(m["dictionary_key_threshold"].(float64)), EnablePadding: aws.Bool(m["enable_padding"].(bool)), - FormatVersion: aws.String(m["format_version"].(string)), + FormatVersion: types.OrcFormatVersion(m["format_version"].(string)), PaddingTolerance: aws.Float64(m["padding_tolerance"].(float64)), - RowIndexStride: aws.Int64(int64(m["row_index_stride"].(int))), - StripeSizeBytes: aws.Int64(int64(m["stripe_size_bytes"].(int))), + RowIndexStride: aws.Int32(int32(m["row_index_stride"].(int))), + StripeSizeBytes: aws.Int32(int32(m["stripe_size_bytes"].(int))), } if v, ok := m["bloom_filter_columns"].([]interface{}); ok && len(v) > 0 { - orcSerDe.BloomFilterColumns = flex.ExpandStringList(v) + orcSerDe.BloomFilterColumns = flex.ExpandStringValueList(v) } return orcSerDe } -func expandParquetSerDe(l []interface{}) *firehose.ParquetSerDe { +func expandParquetSerDe(l []interface{}) *types.ParquetSerDe { if len(l) == 0 { return nil } if l[0] == nil { - return &firehose.ParquetSerDe{} + return &types.ParquetSerDe{} } m := l[0].(map[string]interface{}) - return &firehose.ParquetSerDe{ - BlockSizeBytes: aws.Int64(int64(m["block_size_bytes"].(int))), - Compression: aws.String(m["compression"].(string)), + return &types.ParquetSerDe{ + BlockSizeBytes: aws.Int32(int32(m["block_size_bytes"].(int))), + Compression: types.ParquetCompression(m["compression"].(string)), EnableDictionaryCompression: aws.Bool(m["enable_dictionary_compression"].(bool)), - MaxPaddingBytes: aws.Int64(int64(m["max_padding_bytes"].(int))), - PageSizeBytes: aws.Int64(int64(m["page_size_bytes"].(int))), - WriterVersion: aws.String(m["writer_version"].(string)), + MaxPaddingBytes: aws.Int32(int32(m["max_padding_bytes"].(int))), + PageSizeBytes: aws.Int32(int32(m["page_size_bytes"].(int))), + WriterVersion: types.ParquetWriterVersion(m["writer_version"].(string)), } } -func expandSchemaConfiguration(l []interface{}) *firehose.SchemaConfiguration { +func expandSchemaConfiguration(l []interface{}) *types.SchemaConfiguration { if len(l) == 0 || l[0] == nil { return nil } m := l[0].(map[string]interface{}) - config := &firehose.SchemaConfiguration{ + config := &types.SchemaConfiguration{ DatabaseName: aws.String(m["database_name"].(string)), RoleARN: aws.String(m["role_arn"].(string)), TableName: aws.String(m["table_name"].(string)), @@ -1878,72 +2066,81 @@ func expandSchemaConfiguration(l []interface{}) *firehose.SchemaConfiguration { return config } -func expandDynamicPartitioningConfiguration(s3 map[string]interface{}) *firehose.DynamicPartitioningConfiguration { +func expandDynamicPartitioningConfiguration(s3 map[string]interface{}) *types.DynamicPartitioningConfiguration { config := s3["dynamic_partitioning_configuration"].([]interface{}) if len(config) == 0 { return nil } dynamicPartitioningConfig := config[0].(map[string]interface{}) - DynamicPartitioningConfiguration := &firehose.DynamicPartitioningConfiguration{ + DynamicPartitioningConfiguration := &types.DynamicPartitioningConfiguration{ Enabled: aws.Bool(dynamicPartitioningConfig["enabled"].(bool)), } if retryDuration, ok := dynamicPartitioningConfig["retry_duration"]; ok { - DynamicPartitioningConfiguration.RetryOptions = &firehose.RetryOptions{ - DurationInSeconds: aws.Int64(int64(retryDuration.(int))), + DynamicPartitioningConfiguration.RetryOptions = &types.RetryOptions{ + DurationInSeconds: aws.Int32(int32(retryDuration.(int))), } } return DynamicPartitioningConfiguration } -func expandProcessingConfiguration(s3 map[string]interface{}) *firehose.ProcessingConfiguration { - config := s3["processing_configuration"].([]interface{}) +func expandProcessingConfiguration(tfMap map[string]interface{}, destinationType destinationType, roleARN string) *types.ProcessingConfiguration { + config := tfMap["processing_configuration"].([]interface{}) if len(config) == 0 || config[0] == nil { // It is possible to just pass nil here, but this seems to be the // canonical form that AWS uses, and is less likely to produce diffs. - return &firehose.ProcessingConfiguration{ + return &types.ProcessingConfiguration{ Enabled: aws.Bool(false), - Processors: []*firehose.Processor{}, + Processors: []types.Processor{}, } } processingConfiguration := config[0].(map[string]interface{}) - return &firehose.ProcessingConfiguration{ + return &types.ProcessingConfiguration{ Enabled: aws.Bool(processingConfiguration["enabled"].(bool)), - Processors: expandProcessors(processingConfiguration["processors"].([]interface{})), + Processors: expandProcessors(processingConfiguration["processors"].([]interface{}), destinationType, roleARN), } } -func expandProcessors(processingConfigurationProcessors []interface{}) []*firehose.Processor { - processors := []*firehose.Processor{} +func expandProcessors(processingConfigurationProcessors []interface{}, destinationType destinationType, roleARN string) []types.Processor { + processors := []types.Processor{} for _, processor := range processingConfigurationProcessors { extractedProcessor := expandProcessor(processor.(map[string]interface{})) if extractedProcessor != nil { - processors = append(processors, extractedProcessor) + // Merge in defaults. + for name, value := range defaultProcessorParameters(destinationType, extractedProcessor.Type, roleARN) { + if !slices.ContainsFunc(extractedProcessor.Parameters, func(param types.ProcessorParameter) bool { return name == param.ParameterName }) { + extractedProcessor.Parameters = append(extractedProcessor.Parameters, types.ProcessorParameter{ + ParameterName: name, + ParameterValue: aws.String(value), + }) + } + } + processors = append(processors, *extractedProcessor) } } return processors } -func expandProcessor(processingConfigurationProcessor map[string]interface{}) *firehose.Processor { - var processor *firehose.Processor +func expandProcessor(processingConfigurationProcessor map[string]interface{}) *types.Processor { + var processor *types.Processor processorType := processingConfigurationProcessor["type"].(string) if processorType != "" { - processor = &firehose.Processor{ - Type: aws.String(processorType), + processor = &types.Processor{ + Type: types.ProcessorType(processorType), Parameters: expandProcessorParameters(processingConfigurationProcessor["parameters"].([]interface{})), } } return processor } -func expandProcessorParameters(processorParameters []interface{}) []*firehose.ProcessorParameter { - parameters := []*firehose.ProcessorParameter{} +func expandProcessorParameters(processorParameters []interface{}) []types.ProcessorParameter { + parameters := []types.ProcessorParameter{} for _, attr := range processorParameters { parameters = append(parameters, expandProcessorParameter(attr.(map[string]interface{}))) @@ -1952,37 +2149,37 @@ func expandProcessorParameters(processorParameters []interface{}) []*firehose.Pr return parameters } -func expandProcessorParameter(processorParameter map[string]interface{}) *firehose.ProcessorParameter { - parameter := &firehose.ProcessorParameter{ - ParameterName: aws.String(processorParameter["parameter_name"].(string)), +func expandProcessorParameter(processorParameter map[string]interface{}) types.ProcessorParameter { + parameter := types.ProcessorParameter{ + ParameterName: types.ProcessorParameterName(processorParameter["parameter_name"].(string)), ParameterValue: aws.String(processorParameter["parameter_value"].(string)), } return parameter } -func expandEncryptionConfiguration(s3 map[string]interface{}) *firehose.EncryptionConfiguration { +func expandEncryptionConfiguration(s3 map[string]interface{}) *types.EncryptionConfiguration { if key, ok := s3["kms_key_arn"]; ok && len(key.(string)) > 0 { - return &firehose.EncryptionConfiguration{ - KMSEncryptionConfig: &firehose.KMSEncryptionConfig{ + return &types.EncryptionConfiguration{ + KMSEncryptionConfig: &types.KMSEncryptionConfig{ AWSKMSKeyARN: aws.String(key.(string)), }, } } - return &firehose.EncryptionConfiguration{ - NoEncryptionConfig: aws.String(firehose.NoEncryptionConfigNoEncryption), + return &types.EncryptionConfiguration{ + NoEncryptionConfig: types.NoEncryptionConfigNoEncryption, } } -func expandCloudWatchLoggingOptions(s3 map[string]interface{}) *firehose.CloudWatchLoggingOptions { +func expandCloudWatchLoggingOptions(s3 map[string]interface{}) *types.CloudWatchLoggingOptions { config := s3["cloudwatch_logging_options"].([]interface{}) if len(config) == 0 { return nil } loggingConfig := config[0].(map[string]interface{}) - loggingOptions := &firehose.CloudWatchLoggingOptions{ + loggingOptions := &types.CloudWatchLoggingOptions{ Enabled: aws.Bool(loggingConfig["enabled"].(bool)), } @@ -1997,7 +2194,7 @@ func expandCloudWatchLoggingOptions(s3 map[string]interface{}) *firehose.CloudWa return loggingOptions } -func expandVPCConfiguration(es map[string]interface{}) *firehose.VpcConfiguration { +func expandVPCConfiguration(es map[string]interface{}) *types.VpcConfiguration { config := es["vpc_config"].([]interface{}) if len(config) == 0 { return nil @@ -2005,10 +2202,10 @@ func expandVPCConfiguration(es map[string]interface{}) *firehose.VpcConfiguratio vpcConfig := config[0].(map[string]interface{}) - return &firehose.VpcConfiguration{ + return &types.VpcConfiguration{ RoleARN: aws.String(vpcConfig["role_arn"].(string)), - SubnetIds: flex.ExpandStringSet(vpcConfig["subnet_ids"].(*schema.Set)), - SecurityGroupIds: flex.ExpandStringSet(vpcConfig["security_group_ids"].(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(vpcConfig["subnet_ids"].(*schema.Set)), + SecurityGroupIds: flex.ExpandStringValueSet(vpcConfig["security_group_ids"].(*schema.Set)), } } @@ -2020,13 +2217,14 @@ func expandPrefix(s3 map[string]interface{}) *string { return nil } -func expandRedshiftDestinationConfiguration(redshift map[string]interface{}) *firehose.RedshiftDestinationConfiguration { - configuration := &firehose.RedshiftDestinationConfiguration{ +func expandRedshiftDestinationConfiguration(redshift map[string]interface{}) *types.RedshiftDestinationConfiguration { + roleARN := redshift["role_arn"].(string) + configuration := &types.RedshiftDestinationConfiguration{ ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), RetryOptions: expandRedshiftRetryOptions(redshift), Password: aws.String(redshift["password"].(string)), Username: aws.String(redshift["username"].(string)), - RoleARN: aws.String(redshift["role_arn"].(string)), + RoleARN: aws.String(roleARN), CopyCommand: expandCopyCommand(redshift), S3Configuration: expandS3DestinationConfiguration(redshift["s3_configuration"].([]interface{})), } @@ -2035,23 +2233,24 @@ func expandRedshiftDestinationConfiguration(redshift map[string]interface{}) *fi configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(redshift) } if _, ok := redshift["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(redshift) + configuration.ProcessingConfiguration = expandProcessingConfiguration(redshift, destinationTypeRedshift, roleARN) } if s3BackupMode, ok := redshift["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.RedshiftS3BackupMode(s3BackupMode.(string)) configuration.S3BackupConfiguration = expandS3DestinationConfigurationBackup(redshift) } return configuration } -func expandRedshiftDestinationUpdate(redshift map[string]interface{}) *firehose.RedshiftDestinationUpdate { - configuration := &firehose.RedshiftDestinationUpdate{ +func expandRedshiftDestinationUpdate(redshift map[string]interface{}) *types.RedshiftDestinationUpdate { + roleARN := redshift["role_arn"].(string) + configuration := &types.RedshiftDestinationUpdate{ ClusterJDBCURL: aws.String(redshift["cluster_jdbcurl"].(string)), RetryOptions: expandRedshiftRetryOptions(redshift), Password: aws.String(redshift["password"].(string)), Username: aws.String(redshift["username"].(string)), - RoleARN: aws.String(redshift["role_arn"].(string)), + RoleARN: aws.String(roleARN), CopyCommand: expandCopyCommand(redshift), } @@ -2066,10 +2265,10 @@ func expandRedshiftDestinationUpdate(redshift map[string]interface{}) *firehose. configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(redshift) } if _, ok := redshift["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(redshift) + configuration.ProcessingConfiguration = expandProcessingConfiguration(redshift, destinationTypeRedshift, roleARN) } if s3BackupMode, ok := redshift["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.RedshiftS3BackupMode(s3BackupMode.(string)) configuration.S3BackupUpdate = expandS3DestinationUpdateBackup(redshift) if configuration.S3BackupUpdate != nil { // Redshift does not currently support ErrorOutputPrefix, @@ -2082,12 +2281,13 @@ func expandRedshiftDestinationUpdate(redshift map[string]interface{}) *firehose. return configuration } -func expandElasticsearchDestinationConfiguration(es map[string]interface{}) *firehose.ElasticsearchDestinationConfiguration { - config := &firehose.ElasticsearchDestinationConfiguration{ +func expandElasticsearchDestinationConfiguration(es map[string]interface{}) *types.ElasticsearchDestinationConfiguration { + roleARN := es["role_arn"].(string) + config := &types.ElasticsearchDestinationConfiguration{ BufferingHints: expandElasticsearchBufferingHints(es), IndexName: aws.String(es["index_name"].(string)), RetryOptions: expandElasticsearchRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), + RoleARN: aws.String(roleARN), TypeName: aws.String(es["type_name"].(string)), S3Configuration: expandS3DestinationConfiguration(es["s3_configuration"].([]interface{})), } @@ -2105,14 +2305,14 @@ func expandElasticsearchDestinationConfiguration(es map[string]interface{}) *fir } if _, ok := es["processing_configuration"]; ok { - config.ProcessingConfiguration = expandProcessingConfiguration(es) + config.ProcessingConfiguration = expandProcessingConfiguration(es, destinationTypeElasticsearch, roleARN) } if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - config.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + config.IndexRotationPeriod = types.ElasticsearchIndexRotationPeriod(indexRotationPeriod.(string)) } if s3BackupMode, ok := es["s3_backup_mode"]; ok { - config.S3BackupMode = aws.String(s3BackupMode.(string)) + config.S3BackupMode = types.ElasticsearchS3BackupMode(s3BackupMode.(string)) } if _, ok := es["vpc_config"]; ok { @@ -2122,12 +2322,13 @@ func expandElasticsearchDestinationConfiguration(es map[string]interface{}) *fir return config } -func expandElasticsearchDestinationUpdate(es map[string]interface{}) *firehose.ElasticsearchDestinationUpdate { - update := &firehose.ElasticsearchDestinationUpdate{ +func expandElasticsearchDestinationUpdate(es map[string]interface{}) *types.ElasticsearchDestinationUpdate { + roleARN := es["role_arn"].(string) + update := &types.ElasticsearchDestinationUpdate{ BufferingHints: expandElasticsearchBufferingHints(es), IndexName: aws.String(es["index_name"].(string)), RetryOptions: expandElasticsearchRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), + RoleARN: aws.String(roleARN), TypeName: aws.String(es["type_name"].(string)), S3Update: expandS3DestinationUpdate(es["s3_configuration"].([]interface{})), } @@ -2145,294 +2346,328 @@ func expandElasticsearchDestinationUpdate(es map[string]interface{}) *firehose.E } if _, ok := es["processing_configuration"]; ok { - update.ProcessingConfiguration = expandProcessingConfiguration(es) + update.ProcessingConfiguration = expandProcessingConfiguration(es, destinationTypeElasticsearch, roleARN) } if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - update.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + update.IndexRotationPeriod = types.ElasticsearchIndexRotationPeriod(indexRotationPeriod.(string)) } return update } -func expandAmazonopensearchserviceDestinationConfiguration(es map[string]interface{}) *firehose.AmazonopensearchserviceDestinationConfiguration { - config := &firehose.AmazonopensearchserviceDestinationConfiguration{ - BufferingHints: expandAmazonopensearchserviceBufferingHints(es), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: expandAmazonopensearchserviceRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - TypeName: aws.String(es["type_name"].(string)), - S3Configuration: expandS3DestinationConfiguration(es["s3_configuration"].([]interface{})), +func expandAmazonopensearchserviceDestinationConfiguration(os map[string]interface{}) *types.AmazonopensearchserviceDestinationConfiguration { + roleARN := os["role_arn"].(string) + config := &types.AmazonopensearchserviceDestinationConfiguration{ + BufferingHints: expandAmazonopensearchserviceBufferingHints(os), + IndexName: aws.String(os["index_name"].(string)), + RetryOptions: expandAmazonopensearchserviceRetryOptions(os), + RoleARN: aws.String(roleARN), + TypeName: aws.String(os["type_name"].(string)), + S3Configuration: expandS3DestinationConfiguration(os["s3_configuration"].([]interface{})), } - if v, ok := es["domain_arn"]; ok && v.(string) != "" { + if v, ok := os["domain_arn"]; ok && v.(string) != "" { config.DomainARN = aws.String(v.(string)) } - if v, ok := es["cluster_endpoint"]; ok && v.(string) != "" { + if v, ok := os["cluster_endpoint"]; ok && v.(string) != "" { config.ClusterEndpoint = aws.String(v.(string)) } - if _, ok := es["cloudwatch_logging_options"]; ok { - config.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(es) + if _, ok := os["cloudwatch_logging_options"]; ok { + config.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(os) } - if _, ok := es["processing_configuration"]; ok { - config.ProcessingConfiguration = expandProcessingConfiguration(es) + if _, ok := os["processing_configuration"]; ok { + config.ProcessingConfiguration = expandProcessingConfiguration(os, destinationTypeOpenSearch, roleARN) } - if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - config.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + if indexRotationPeriod, ok := os["index_rotation_period"]; ok { + config.IndexRotationPeriod = types.AmazonopensearchserviceIndexRotationPeriod(indexRotationPeriod.(string)) } - if s3BackupMode, ok := es["s3_backup_mode"]; ok { - config.S3BackupMode = aws.String(s3BackupMode.(string)) + if s3BackupMode, ok := os["s3_backup_mode"]; ok { + config.S3BackupMode = types.AmazonopensearchserviceS3BackupMode(s3BackupMode.(string)) } - if _, ok := es["vpc_config"]; ok { - config.VpcConfiguration = expandVPCConfiguration(es) + if _, ok := os["vpc_config"]; ok { + config.VpcConfiguration = expandVPCConfiguration(os) + } + + if v, ok := os["document_id_options"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + config.DocumentIdOptions = expandDocumentIDOptions(v[0].(map[string]interface{})) } return config } -func expandAmazonopensearchserviceDestinationUpdate(es map[string]interface{}) *firehose.AmazonopensearchserviceDestinationUpdate { - update := &firehose.AmazonopensearchserviceDestinationUpdate{ - BufferingHints: expandAmazonopensearchserviceBufferingHints(es), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: expandAmazonopensearchserviceRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - TypeName: aws.String(es["type_name"].(string)), - S3Update: expandS3DestinationUpdate(es["s3_configuration"].([]interface{})), +func expandAmazonopensearchserviceDestinationUpdate(os map[string]interface{}) *types.AmazonopensearchserviceDestinationUpdate { + roleARN := os["role_arn"].(string) + update := &types.AmazonopensearchserviceDestinationUpdate{ + BufferingHints: expandAmazonopensearchserviceBufferingHints(os), + IndexName: aws.String(os["index_name"].(string)), + RetryOptions: expandAmazonopensearchserviceRetryOptions(os), + RoleARN: aws.String(roleARN), + TypeName: aws.String(os["type_name"].(string)), + S3Update: expandS3DestinationUpdate(os["s3_configuration"].([]interface{})), } - if v, ok := es["domain_arn"]; ok && v.(string) != "" { + if v, ok := os["domain_arn"]; ok && v.(string) != "" { update.DomainARN = aws.String(v.(string)) } - if v, ok := es["cluster_endpoint"]; ok && v.(string) != "" { + if v, ok := os["cluster_endpoint"]; ok && v.(string) != "" { update.ClusterEndpoint = aws.String(v.(string)) } - if _, ok := es["cloudwatch_logging_options"]; ok { - update.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(es) + if _, ok := os["cloudwatch_logging_options"]; ok { + update.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(os) } - if _, ok := es["processing_configuration"]; ok { - update.ProcessingConfiguration = expandProcessingConfiguration(es) + if _, ok := os["processing_configuration"]; ok { + update.ProcessingConfiguration = expandProcessingConfiguration(os, destinationTypeOpenSearch, roleARN) } - if indexRotationPeriod, ok := es["index_rotation_period"]; ok { - update.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + if indexRotationPeriod, ok := os["index_rotation_period"]; ok { + update.IndexRotationPeriod = types.AmazonopensearchserviceIndexRotationPeriod(indexRotationPeriod.(string)) + } + + if v, ok := os["document_id_options"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + update.DocumentIdOptions = expandDocumentIDOptions(v[0].(map[string]interface{})) } return update } -func expandAmazonOpenSearchServerlessDestinationConfiguration(es map[string]interface{}) *firehose.AmazonOpenSearchServerlessDestinationConfiguration { - config := &firehose.AmazonOpenSearchServerlessDestinationConfiguration{ - BufferingHints: expandAmazonOpenSearchServerlessBufferingHints(es), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: expandAmazonOpenSearchServerlessRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - S3Configuration: expandS3DestinationConfiguration(es["s3_configuration"].([]interface{})), +func expandAmazonOpenSearchServerlessDestinationConfiguration(oss map[string]interface{}) *types.AmazonOpenSearchServerlessDestinationConfiguration { + roleARN := oss["role_arn"].(string) + config := &types.AmazonOpenSearchServerlessDestinationConfiguration{ + BufferingHints: expandAmazonOpenSearchServerlessBufferingHints(oss), + IndexName: aws.String(oss["index_name"].(string)), + RetryOptions: expandAmazonOpenSearchServerlessRetryOptions(oss), + RoleARN: aws.String(roleARN), + S3Configuration: expandS3DestinationConfiguration(oss["s3_configuration"].([]interface{})), } - if v, ok := es["collection_endpoint"]; ok && v.(string) != "" { + if v, ok := oss["collection_endpoint"]; ok && v.(string) != "" { config.CollectionEndpoint = aws.String(v.(string)) } - if _, ok := es["cloudwatch_logging_options"]; ok { - config.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(es) + if _, ok := oss["cloudwatch_logging_options"]; ok { + config.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(oss) } - if _, ok := es["processing_configuration"]; ok { - config.ProcessingConfiguration = expandProcessingConfiguration(es) + if _, ok := oss["processing_configuration"]; ok { + config.ProcessingConfiguration = expandProcessingConfiguration(oss, destinationTypeOpenSearchServerless, roleARN) } - if s3BackupMode, ok := es["s3_backup_mode"]; ok { - config.S3BackupMode = aws.String(s3BackupMode.(string)) + if s3BackupMode, ok := oss["s3_backup_mode"]; ok { + config.S3BackupMode = types.AmazonOpenSearchServerlessS3BackupMode(s3BackupMode.(string)) } - if _, ok := es["vpc_config"]; ok { - config.VpcConfiguration = expandVPCConfiguration(es) + if _, ok := oss["vpc_config"]; ok { + config.VpcConfiguration = expandVPCConfiguration(oss) } return config } -func expandAmazonOpenSearchServerlessDestinationUpdate(es map[string]interface{}) *firehose.AmazonOpenSearchServerlessDestinationUpdate { - update := &firehose.AmazonOpenSearchServerlessDestinationUpdate{ - BufferingHints: expandAmazonOpenSearchServerlessBufferingHints(es), - IndexName: aws.String(es["index_name"].(string)), - RetryOptions: expandAmazonOpenSearchServerlessRetryOptions(es), - RoleARN: aws.String(es["role_arn"].(string)), - S3Update: expandS3DestinationUpdate(es["s3_configuration"].([]interface{})), +func expandAmazonOpenSearchServerlessDestinationUpdate(oss map[string]interface{}) *types.AmazonOpenSearchServerlessDestinationUpdate { + roleARN := oss["role_arn"].(string) + update := &types.AmazonOpenSearchServerlessDestinationUpdate{ + BufferingHints: expandAmazonOpenSearchServerlessBufferingHints(oss), + IndexName: aws.String(oss["index_name"].(string)), + RetryOptions: expandAmazonOpenSearchServerlessRetryOptions(oss), + RoleARN: aws.String(roleARN), + S3Update: expandS3DestinationUpdate(oss["s3_configuration"].([]interface{})), } - if v, ok := es["collection_endpoint"]; ok && v.(string) != "" { + if v, ok := oss["collection_endpoint"]; ok && v.(string) != "" { update.CollectionEndpoint = aws.String(v.(string)) } - if _, ok := es["cloudwatch_logging_options"]; ok { - update.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(es) + if _, ok := oss["cloudwatch_logging_options"]; ok { + update.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(oss) } - if _, ok := es["processing_configuration"]; ok { - update.ProcessingConfiguration = expandProcessingConfiguration(es) + if _, ok := oss["processing_configuration"]; ok { + update.ProcessingConfiguration = expandProcessingConfiguration(oss, destinationTypeOpenSearchServerless, roleARN) } return update } -func expandSplunkDestinationConfiguration(splunk map[string]interface{}) *firehose.SplunkDestinationConfiguration { - configuration := &firehose.SplunkDestinationConfiguration{ +func expandSplunkDestinationConfiguration(splunk map[string]interface{}) *types.SplunkDestinationConfiguration { + configuration := &types.SplunkDestinationConfiguration{ HECToken: aws.String(splunk["hec_token"].(string)), - HECEndpointType: aws.String(splunk["hec_endpoint_type"].(string)), + HECEndpointType: types.HECEndpointType(splunk["hec_endpoint_type"].(string)), HECEndpoint: aws.String(splunk["hec_endpoint"].(string)), - HECAcknowledgmentTimeoutInSeconds: aws.Int64(int64(splunk["hec_acknowledgment_timeout"].(int))), + HECAcknowledgmentTimeoutInSeconds: aws.Int32(int32(splunk["hec_acknowledgment_timeout"].(int))), RetryOptions: expandSplunkRetryOptions(splunk), S3Configuration: expandS3DestinationConfiguration(splunk["s3_configuration"].([]interface{})), } + bufferingHints := &types.SplunkBufferingHints{} + + if bufferingInterval, ok := splunk["buffering_interval"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) + } + if bufferingSize, ok := splunk["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) + } + configuration.BufferingHints = bufferingHints + if _, ok := splunk["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(splunk) + configuration.ProcessingConfiguration = expandProcessingConfiguration(splunk, destinationTypeSplunk, "") } if _, ok := splunk["cloudwatch_logging_options"]; ok { configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(splunk) } if s3BackupMode, ok := splunk["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.SplunkS3BackupMode(s3BackupMode.(string)) } return configuration } -func expandSplunkDestinationUpdate(splunk map[string]interface{}) *firehose.SplunkDestinationUpdate { - configuration := &firehose.SplunkDestinationUpdate{ +func expandSplunkDestinationUpdate(splunk map[string]interface{}) *types.SplunkDestinationUpdate { + configuration := &types.SplunkDestinationUpdate{ HECToken: aws.String(splunk["hec_token"].(string)), - HECEndpointType: aws.String(splunk["hec_endpoint_type"].(string)), + HECEndpointType: types.HECEndpointType(splunk["hec_endpoint_type"].(string)), HECEndpoint: aws.String(splunk["hec_endpoint"].(string)), - HECAcknowledgmentTimeoutInSeconds: aws.Int64(int64(splunk["hec_acknowledgment_timeout"].(int))), + HECAcknowledgmentTimeoutInSeconds: aws.Int32(int32(splunk["hec_acknowledgment_timeout"].(int))), RetryOptions: expandSplunkRetryOptions(splunk), S3Update: expandS3DestinationUpdate(splunk["s3_configuration"].([]interface{})), } + bufferingHints := &types.SplunkBufferingHints{} + + if bufferingInterval, ok := splunk["buffering_interval"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) + } + if bufferingSize, ok := splunk["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) + } + configuration.BufferingHints = bufferingHints + if _, ok := splunk["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(splunk) + configuration.ProcessingConfiguration = expandProcessingConfiguration(splunk, destinationTypeSplunk, "") } if _, ok := splunk["cloudwatch_logging_options"]; ok { configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(splunk) } if s3BackupMode, ok := splunk["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + configuration.S3BackupMode = types.SplunkS3BackupMode(s3BackupMode.(string)) } return configuration } -func expandHTTPEndpointDestinationConfiguration(HttpEndpoint map[string]interface{}) *firehose.HttpEndpointDestinationConfiguration { - configuration := &firehose.HttpEndpointDestinationConfiguration{ - RetryOptions: expandHTTPEndpointRetryOptions(HttpEndpoint), - RoleARN: aws.String(HttpEndpoint["role_arn"].(string)), - S3Configuration: expandS3DestinationConfiguration(HttpEndpoint["s3_configuration"].([]interface{})), +func expandHTTPEndpointDestinationConfiguration(httpEndpoint map[string]interface{}) *types.HttpEndpointDestinationConfiguration { + roleARN := httpEndpoint["role_arn"].(string) + configuration := &types.HttpEndpointDestinationConfiguration{ + RetryOptions: expandHTTPEndpointRetryOptions(httpEndpoint), + RoleARN: aws.String(roleARN), + S3Configuration: expandS3DestinationConfiguration(httpEndpoint["s3_configuration"].([]interface{})), } - configuration.EndpointConfiguration = expandHTTPEndpointConfiguration(HttpEndpoint) + configuration.EndpointConfiguration = expandHTTPEndpointConfiguration(httpEndpoint) - bufferingHints := &firehose.HttpEndpointBufferingHints{} + bufferingHints := &types.HttpEndpointBufferingHints{} - if bufferingInterval, ok := HttpEndpoint["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + if bufferingInterval, ok := httpEndpoint["buffering_interval"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) } - if bufferingSize, ok := HttpEndpoint["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + if bufferingSize, ok := httpEndpoint["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) } configuration.BufferingHints = bufferingHints - if _, ok := HttpEndpoint["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(HttpEndpoint) + if _, ok := httpEndpoint["processing_configuration"]; ok { + configuration.ProcessingConfiguration = expandProcessingConfiguration(httpEndpoint, destinationTypeHTTPEndpoint, roleARN) } - if _, ok := HttpEndpoint["request_configuration"]; ok { - configuration.RequestConfiguration = expandHTTPEndpointRequestConfiguration(HttpEndpoint) + if _, ok := httpEndpoint["request_configuration"]; ok { + configuration.RequestConfiguration = expandHTTPEndpointRequestConfiguration(httpEndpoint) } - if _, ok := HttpEndpoint["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(HttpEndpoint) + if _, ok := httpEndpoint["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(httpEndpoint) } - if s3BackupMode, ok := HttpEndpoint["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + if s3BackupMode, ok := httpEndpoint["s3_backup_mode"]; ok { + configuration.S3BackupMode = types.HttpEndpointS3BackupMode(s3BackupMode.(string)) } return configuration } -func expandHTTPEndpointDestinationUpdate(HttpEndpoint map[string]interface{}) *firehose.HttpEndpointDestinationUpdate { - configuration := &firehose.HttpEndpointDestinationUpdate{ - RetryOptions: expandHTTPEndpointRetryOptions(HttpEndpoint), - RoleARN: aws.String(HttpEndpoint["role_arn"].(string)), - S3Update: expandS3DestinationUpdate(HttpEndpoint["s3_configuration"].([]interface{})), +func expandHTTPEndpointDestinationUpdate(httpEndpoint map[string]interface{}) *types.HttpEndpointDestinationUpdate { + roleARN := httpEndpoint["role_arn"].(string) + configuration := &types.HttpEndpointDestinationUpdate{ + RetryOptions: expandHTTPEndpointRetryOptions(httpEndpoint), + RoleARN: aws.String(roleARN), + S3Update: expandS3DestinationUpdate(httpEndpoint["s3_configuration"].([]interface{})), } - configuration.EndpointConfiguration = expandHTTPEndpointConfiguration(HttpEndpoint) + configuration.EndpointConfiguration = expandHTTPEndpointConfiguration(httpEndpoint) - bufferingHints := &firehose.HttpEndpointBufferingHints{} + bufferingHints := &types.HttpEndpointBufferingHints{} - if bufferingInterval, ok := HttpEndpoint["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + if bufferingInterval, ok := httpEndpoint["buffering_interval"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) } - if bufferingSize, ok := HttpEndpoint["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + if bufferingSize, ok := httpEndpoint["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) } configuration.BufferingHints = bufferingHints - if _, ok := HttpEndpoint["processing_configuration"]; ok { - configuration.ProcessingConfiguration = expandProcessingConfiguration(HttpEndpoint) + if _, ok := httpEndpoint["processing_configuration"]; ok { + configuration.ProcessingConfiguration = expandProcessingConfiguration(httpEndpoint, destinationTypeHTTPEndpoint, roleARN) } - if _, ok := HttpEndpoint["request_configuration"]; ok { - configuration.RequestConfiguration = expandHTTPEndpointRequestConfiguration(HttpEndpoint) + if _, ok := httpEndpoint["request_configuration"]; ok { + configuration.RequestConfiguration = expandHTTPEndpointRequestConfiguration(httpEndpoint) } - if _, ok := HttpEndpoint["cloudwatch_logging_options"]; ok { - configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(HttpEndpoint) + if _, ok := httpEndpoint["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = expandCloudWatchLoggingOptions(httpEndpoint) } - if s3BackupMode, ok := HttpEndpoint["s3_backup_mode"]; ok { - configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + if s3BackupMode, ok := httpEndpoint["s3_backup_mode"]; ok { + configuration.S3BackupMode = types.HttpEndpointS3BackupMode(s3BackupMode.(string)) } return configuration } -func expandHTTPEndpointCommonAttributes(ca []interface{}) []*firehose.HttpEndpointCommonAttribute { - CommonAttributes := make([]*firehose.HttpEndpointCommonAttribute, 0, len(ca)) +func expandHTTPEndpointCommonAttributes(ca []interface{}) []types.HttpEndpointCommonAttribute { + commonAttributes := make([]types.HttpEndpointCommonAttribute, 0, len(ca)) for _, raw := range ca { data := raw.(map[string]interface{}) - a := &firehose.HttpEndpointCommonAttribute{ + a := types.HttpEndpointCommonAttribute{ AttributeName: aws.String(data["name"].(string)), AttributeValue: aws.String(data["value"].(string)), } - CommonAttributes = append(CommonAttributes, a) + commonAttributes = append(commonAttributes, a) } - return CommonAttributes + return commonAttributes } -func expandHTTPEndpointRequestConfiguration(rc map[string]interface{}) *firehose.HttpEndpointRequestConfiguration { +func expandHTTPEndpointRequestConfiguration(rc map[string]interface{}) *types.HttpEndpointRequestConfiguration { config := rc["request_configuration"].([]interface{}) if len(config) == 0 { return nil } requestConfig := config[0].(map[string]interface{}) - RequestConfiguration := &firehose.HttpEndpointRequestConfiguration{} + RequestConfiguration := &types.HttpEndpointRequestConfiguration{} if contentEncoding, ok := requestConfig["content_encoding"]; ok { - RequestConfiguration.ContentEncoding = aws.String(contentEncoding.(string)) + RequestConfiguration.ContentEncoding = types.ContentEncoding(contentEncoding.(string)) } if commonAttributes, ok := requestConfig["common_attributes"]; ok { @@ -2442,8 +2677,8 @@ func expandHTTPEndpointRequestConfiguration(rc map[string]interface{}) *firehose return RequestConfiguration } -func expandHTTPEndpointConfiguration(ep map[string]interface{}) *firehose.HttpEndpointConfiguration { - endpointConfiguration := &firehose.HttpEndpointConfiguration{ +func expandHTTPEndpointConfiguration(ep map[string]interface{}) *types.HttpEndpointConfiguration { + endpointConfiguration := &types.HttpEndpointConfiguration{ Url: aws.String(ep["url"].(string)), } @@ -2458,107 +2693,107 @@ func expandHTTPEndpointConfiguration(ep map[string]interface{}) *firehose.HttpEn return endpointConfiguration } -func expandElasticsearchBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBufferingHints { - bufferingHints := &firehose.ElasticsearchBufferingHints{} +func expandElasticsearchBufferingHints(es map[string]interface{}) *types.ElasticsearchBufferingHints { + bufferingHints := &types.ElasticsearchBufferingHints{} if bufferingInterval, ok := es["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) } if bufferingSize, ok := es["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) } return bufferingHints } -func expandAmazonopensearchserviceBufferingHints(es map[string]interface{}) *firehose.AmazonopensearchserviceBufferingHints { - bufferingHints := &firehose.AmazonopensearchserviceBufferingHints{} +func expandAmazonopensearchserviceBufferingHints(es map[string]interface{}) *types.AmazonopensearchserviceBufferingHints { + bufferingHints := &types.AmazonopensearchserviceBufferingHints{} if bufferingInterval, ok := es["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) } if bufferingSize, ok := es["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) } return bufferingHints } -func expandAmazonOpenSearchServerlessBufferingHints(es map[string]interface{}) *firehose.AmazonOpenSearchServerlessBufferingHints { - bufferingHints := &firehose.AmazonOpenSearchServerlessBufferingHints{} +func expandAmazonOpenSearchServerlessBufferingHints(es map[string]interface{}) *types.AmazonOpenSearchServerlessBufferingHints { + bufferingHints := &types.AmazonOpenSearchServerlessBufferingHints{} if bufferingInterval, ok := es["buffering_interval"].(int); ok { - bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + bufferingHints.IntervalInSeconds = aws.Int32(int32(bufferingInterval)) } if bufferingSize, ok := es["buffering_size"].(int); ok { - bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + bufferingHints.SizeInMBs = aws.Int32(int32(bufferingSize)) } return bufferingHints } -func expandElasticsearchRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions { - retryOptions := &firehose.ElasticsearchRetryOptions{} +func expandElasticsearchRetryOptions(es map[string]interface{}) *types.ElasticsearchRetryOptions { + retryOptions := &types.ElasticsearchRetryOptions{} if retryDuration, ok := es["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandAmazonopensearchserviceRetryOptions(es map[string]interface{}) *firehose.AmazonopensearchserviceRetryOptions { - retryOptions := &firehose.AmazonopensearchserviceRetryOptions{} +func expandAmazonopensearchserviceRetryOptions(es map[string]interface{}) *types.AmazonopensearchserviceRetryOptions { + retryOptions := &types.AmazonopensearchserviceRetryOptions{} if retryDuration, ok := es["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandAmazonOpenSearchServerlessRetryOptions(es map[string]interface{}) *firehose.AmazonOpenSearchServerlessRetryOptions { - retryOptions := &firehose.AmazonOpenSearchServerlessRetryOptions{} +func expandAmazonOpenSearchServerlessRetryOptions(es map[string]interface{}) *types.AmazonOpenSearchServerlessRetryOptions { + retryOptions := &types.AmazonOpenSearchServerlessRetryOptions{} if retryDuration, ok := es["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandHTTPEndpointRetryOptions(tfMap map[string]interface{}) *firehose.HttpEndpointRetryOptions { - retryOptions := &firehose.HttpEndpointRetryOptions{} +func expandHTTPEndpointRetryOptions(tfMap map[string]interface{}) *types.HttpEndpointRetryOptions { + retryOptions := &types.HttpEndpointRetryOptions{} if retryDuration, ok := tfMap["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandRedshiftRetryOptions(redshift map[string]interface{}) *firehose.RedshiftRetryOptions { - retryOptions := &firehose.RedshiftRetryOptions{} +func expandRedshiftRetryOptions(redshift map[string]interface{}) *types.RedshiftRetryOptions { + retryOptions := &types.RedshiftRetryOptions{} if retryDuration, ok := redshift["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandSplunkRetryOptions(splunk map[string]interface{}) *firehose.SplunkRetryOptions { - retryOptions := &firehose.SplunkRetryOptions{} +func expandSplunkRetryOptions(splunk map[string]interface{}) *types.SplunkRetryOptions { + retryOptions := &types.SplunkRetryOptions{} if retryDuration, ok := splunk["retry_duration"].(int); ok { - retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + retryOptions.DurationInSeconds = aws.Int32(int32(retryDuration)) } return retryOptions } -func expandCopyCommand(redshift map[string]interface{}) *firehose.CopyCommand { - cmd := &firehose.CopyCommand{ +func expandCopyCommand(redshift map[string]interface{}) *types.CopyCommand { + cmd := &types.CopyCommand{ DataTableName: aws.String(redshift["data_table_name"].(string)), } if copyOptions, ok := redshift["copy_options"]; ok { @@ -2571,7 +2806,7 @@ func expandCopyCommand(redshift map[string]interface{}) *firehose.CopyCommand { return cmd } -func expandDeliveryStreamEncryptionConfigurationInput(tfList []interface{}) *firehose.DeliveryStreamEncryptionConfigurationInput { +func expandDeliveryStreamEncryptionConfigurationInput(tfList []interface{}) *types.DeliveryStreamEncryptionConfigurationInput { if len(tfList) == 0 { return nil } @@ -2582,25 +2817,25 @@ func expandDeliveryStreamEncryptionConfigurationInput(tfList []interface{}) *fir return nil } - apiObject := &firehose.DeliveryStreamEncryptionConfigurationInput{} + apiObject := &types.DeliveryStreamEncryptionConfigurationInput{} if v, ok := tfMap["key_arn"].(string); ok && v != "" { apiObject.KeyARN = aws.String(v) } if v, ok := tfMap["key_type"].(string); ok && v != "" { - apiObject.KeyType = aws.String(v) + apiObject.KeyType = types.KeyType(v) } return apiObject } -func expandMSKSourceConfiguration(tfMap map[string]interface{}) *firehose.MSKSourceConfiguration { +func expandMSKSourceConfiguration(tfMap map[string]interface{}) *types.MSKSourceConfiguration { if tfMap == nil { return nil } - apiObject := &firehose.MSKSourceConfiguration{} + apiObject := &types.MSKSourceConfiguration{} if v, ok := tfMap["authentication_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.AuthenticationConfiguration = expandAuthenticationConfiguration(v[0].(map[string]interface{})) @@ -2617,15 +2852,15 @@ func expandMSKSourceConfiguration(tfMap map[string]interface{}) *firehose.MSKSou return apiObject } -func expandAuthenticationConfiguration(tfMap map[string]interface{}) *firehose.AuthenticationConfiguration { +func expandAuthenticationConfiguration(tfMap map[string]interface{}) *types.AuthenticationConfiguration { if tfMap == nil { return nil } - apiObject := &firehose.AuthenticationConfiguration{} + apiObject := &types.AuthenticationConfiguration{} if v, ok := tfMap["connectivity"].(string); ok && v != "" { - apiObject.Connectivity = aws.String(v) + apiObject.Connectivity = types.Connectivity(v) } if v, ok := tfMap["role_arn"].(string); ok && v != "" { @@ -2635,7 +2870,7 @@ func expandAuthenticationConfiguration(tfMap map[string]interface{}) *firehose.A return apiObject } -func flattenMSKSourceDescription(apiObject *firehose.MSKSourceDescription) map[string]interface{} { +func flattenMSKSourceDescription(apiObject *types.MSKSourceDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -2647,284 +2882,291 @@ func flattenMSKSourceDescription(apiObject *firehose.MSKSourceDescription) map[s } if v := apiObject.MSKClusterARN; v != nil { - tfMap["msk_cluster_arn"] = aws.StringValue(v) + tfMap["msk_cluster_arn"] = aws.ToString(v) } if v := apiObject.TopicName; v != nil { - tfMap["topic_name"] = aws.StringValue(v) + tfMap["topic_name"] = aws.ToString(v) } return tfMap } -func flattenAuthenticationConfiguration(apiObject *firehose.AuthenticationConfiguration) map[string]interface{} { +func flattenAuthenticationConfiguration(apiObject *types.AuthenticationConfiguration) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Connectivity; v != nil { - tfMap["connectivity"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "connectivity": apiObject.Connectivity, } if v := apiObject.RoleARN; v != nil { - tfMap["role_arn"] = aws.StringValue(v) + tfMap["role_arn"] = aws.ToString(v) } return tfMap } -func flattenCloudWatchLoggingOptions(clo *firehose.CloudWatchLoggingOptions) []interface{} { +func flattenCloudWatchLoggingOptions(clo *types.CloudWatchLoggingOptions) []interface{} { if clo == nil { return []interface{}{} } cloudwatchLoggingOptions := map[string]interface{}{ - "enabled": aws.BoolValue(clo.Enabled), + "enabled": aws.ToBool(clo.Enabled), } - if aws.BoolValue(clo.Enabled) { - cloudwatchLoggingOptions["log_group_name"] = aws.StringValue(clo.LogGroupName) - cloudwatchLoggingOptions["log_stream_name"] = aws.StringValue(clo.LogStreamName) + if aws.ToBool(clo.Enabled) { + cloudwatchLoggingOptions["log_group_name"] = aws.ToString(clo.LogGroupName) + cloudwatchLoggingOptions["log_stream_name"] = aws.ToString(clo.LogStreamName) } return []interface{}{cloudwatchLoggingOptions} } -func flattenElasticsearchDestinationDescription(description *firehose.ElasticsearchDestinationDescription) []map[string]interface{} { +func flattenElasticsearchDestinationDescription(description *types.ElasticsearchDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "role_arn": aws.StringValue(description.RoleARN), - "type_name": aws.StringValue(description.TypeName), - "index_name": aws.StringValue(description.IndexName), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "role_arn": aws.ToString(description.RoleARN), + "type_name": aws.ToString(description.TypeName), + "index_name": aws.ToString(description.IndexName), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), - "index_rotation_period": aws.StringValue(description.IndexRotationPeriod), + "index_rotation_period": description.IndexRotationPeriod, "vpc_config": flattenVPCConfigurationDescription(description.VpcConfigurationDescription), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeElasticsearch, aws.ToString(description.RoleARN)), } if description.DomainARN != nil { - m["domain_arn"] = aws.StringValue(description.DomainARN) + m["domain_arn"] = aws.ToString(description.DomainARN) } if description.ClusterEndpoint != nil { - m["cluster_endpoint"] = aws.StringValue(description.ClusterEndpoint) + m["cluster_endpoint"] = aws.ToString(description.ClusterEndpoint) } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) } return []map[string]interface{}{m} } -func flattenAmazonopensearchserviceDestinationDescription(description *firehose.AmazonopensearchserviceDestinationDescription) []map[string]interface{} { +func flattenAmazonopensearchserviceDestinationDescription(description *types.AmazonopensearchserviceDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "role_arn": aws.StringValue(description.RoleARN), - "type_name": aws.StringValue(description.TypeName), - "index_name": aws.StringValue(description.IndexName), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "role_arn": aws.ToString(description.RoleARN), + "type_name": aws.ToString(description.TypeName), + "index_name": aws.ToString(description.IndexName), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), - "index_rotation_period": aws.StringValue(description.IndexRotationPeriod), + "index_rotation_period": description.IndexRotationPeriod, "vpc_config": flattenVPCConfigurationDescription(description.VpcConfigurationDescription), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeOpenSearch, aws.ToString(description.RoleARN)), } if description.DomainARN != nil { - m["domain_arn"] = aws.StringValue(description.DomainARN) + m["domain_arn"] = aws.ToString(description.DomainARN) } if description.ClusterEndpoint != nil { - m["cluster_endpoint"] = aws.StringValue(description.ClusterEndpoint) + m["cluster_endpoint"] = aws.ToString(description.ClusterEndpoint) } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) + } + + if v := description.DocumentIdOptions; v != nil { + m["document_id_options"] = []interface{}{flattenDocumentIDOptions(v)} } return []map[string]interface{}{m} } -func flattenAmazonOpenSearchServerlessDestinationDescription(description *firehose.AmazonOpenSearchServerlessDestinationDescription) []map[string]interface{} { +func flattenAmazonOpenSearchServerlessDestinationDescription(description *types.AmazonOpenSearchServerlessDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "role_arn": aws.StringValue(description.RoleARN), - "index_name": aws.StringValue(description.IndexName), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "role_arn": aws.ToString(description.RoleARN), + "index_name": aws.ToString(description.IndexName), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), "vpc_config": flattenVPCConfigurationDescription(description.VpcConfigurationDescription), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeOpenSearchServerless, aws.ToString(description.RoleARN)), } if description.CollectionEndpoint != nil { - m["collection_endpoint"] = aws.StringValue(description.CollectionEndpoint) + m["collection_endpoint"] = aws.ToString(description.CollectionEndpoint) } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) } return []map[string]interface{}{m} } -func flattenVPCConfigurationDescription(description *firehose.VpcConfigurationDescription) []map[string]interface{} { +func flattenVPCConfigurationDescription(description *types.VpcConfigurationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "vpc_id": aws.StringValue(description.VpcId), - "subnet_ids": flex.FlattenStringSet(description.SubnetIds), - "security_group_ids": flex.FlattenStringSet(description.SecurityGroupIds), - "role_arn": aws.StringValue(description.RoleARN), + "vpc_id": aws.ToString(description.VpcId), + "subnet_ids": description.SubnetIds, + "security_group_ids": description.SecurityGroupIds, + "role_arn": aws.ToString(description.RoleARN), } return []map[string]interface{}{m} } -func flattenExtendedS3DestinationDescription(description *firehose.ExtendedS3DestinationDescription) []map[string]interface{} { +func flattenExtendedS3DestinationDescription(description *types.ExtendedS3DestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "bucket_arn": aws.StringValue(description.BucketARN), + "bucket_arn": aws.ToString(description.BucketARN), "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "compression_format": aws.StringValue(description.CompressionFormat), + "compression_format": description.CompressionFormat, "data_format_conversion_configuration": flattenDataFormatConversionConfiguration(description.DataFormatConversionConfiguration), - "error_output_prefix": aws.StringValue(description.ErrorOutputPrefix), - "prefix": aws.StringValue(description.Prefix), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "error_output_prefix": aws.ToString(description.ErrorOutputPrefix), + "prefix": aws.ToString(description.Prefix), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeExtendedS3, aws.ToString(description.RoleARN)), "dynamic_partitioning_configuration": flattenDynamicPartitioningConfiguration(description.DynamicPartitioningConfiguration), - "role_arn": aws.StringValue(description.RoleARN), + "role_arn": aws.ToString(description.RoleARN), "s3_backup_configuration": flattenS3DestinationDescription(description.S3BackupDescription), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "s3_backup_mode": description.S3BackupMode, } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } if description.EncryptionConfiguration != nil && description.EncryptionConfiguration.KMSEncryptionConfig != nil { - m["kms_key_arn"] = aws.StringValue(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + m["kms_key_arn"] = aws.ToString(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) } return []map[string]interface{}{m} } -func flattenRedshiftDestinationDescription(description *firehose.RedshiftDestinationDescription, configuredPassword string) []map[string]interface{} { +func flattenRedshiftDestinationDescription(description *types.RedshiftDestinationDescription, configuredPassword string) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "cluster_jdbcurl": aws.StringValue(description.ClusterJDBCURL), + "cluster_jdbcurl": aws.ToString(description.ClusterJDBCURL), "password": configuredPassword, - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), - "role_arn": aws.StringValue(description.RoleARN), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeRedshift, aws.ToString(description.RoleARN)), + "role_arn": aws.ToString(description.RoleARN), "s3_backup_configuration": flattenS3DestinationDescription(description.S3BackupDescription), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), - "username": aws.StringValue(description.Username), + "username": aws.ToString(description.Username), } if description.CopyCommand != nil { - m["copy_options"] = aws.StringValue(description.CopyCommand.CopyOptions) - m["data_table_columns"] = aws.StringValue(description.CopyCommand.DataTableColumns) - m["data_table_name"] = aws.StringValue(description.CopyCommand.DataTableName) + m["copy_options"] = aws.ToString(description.CopyCommand.CopyOptions) + m["data_table_columns"] = aws.ToString(description.CopyCommand.DataTableColumns) + m["data_table_name"] = aws.ToString(description.CopyCommand.DataTableName) } if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) } return []map[string]interface{}{m} } -func flattenSplunkDestinationDescription(description *firehose.SplunkDestinationDescription) []map[string]interface{} { +func flattenSplunkDestinationDescription(description *types.SplunkDestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "hec_acknowledgment_timeout": int(aws.Int64Value(description.HECAcknowledgmentTimeoutInSeconds)), - "hec_endpoint_type": aws.StringValue(description.HECEndpointType), - "hec_endpoint": aws.StringValue(description.HECEndpoint), - "hec_token": aws.StringValue(description.HECToken), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, ""), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "hec_acknowledgment_timeout": int(aws.ToInt32(description.HECAcknowledgmentTimeoutInSeconds)), + "hec_endpoint_type": description.HECEndpointType, + "hec_endpoint": aws.ToString(description.HECEndpoint), + "hec_token": aws.ToString(description.HECToken), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeSplunk, ""), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), } + if description.BufferingHints != nil { + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) + } + if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) } return []map[string]interface{}{m} } -func flattenS3DestinationDescription(description *firehose.S3DestinationDescription) []map[string]interface{} { +func flattenS3DestinationDescription(description *types.S3DestinationDescription) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "bucket_arn": aws.StringValue(description.BucketARN), + "bucket_arn": aws.ToString(description.BucketARN), "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "compression_format": aws.StringValue(description.CompressionFormat), - "error_output_prefix": aws.StringValue(description.ErrorOutputPrefix), - "prefix": aws.StringValue(description.Prefix), - "role_arn": aws.StringValue(description.RoleARN), + "compression_format": description.CompressionFormat, + "error_output_prefix": aws.ToString(description.ErrorOutputPrefix), + "prefix": aws.ToString(description.Prefix), + "role_arn": aws.ToString(description.RoleARN), } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } if description.EncryptionConfiguration != nil && description.EncryptionConfiguration.KMSEncryptionConfig != nil { - m["kms_key_arn"] = aws.StringValue(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) + m["kms_key_arn"] = aws.ToString(description.EncryptionConfiguration.KMSEncryptionConfig.AWSKMSKeyARN) } return []map[string]interface{}{m} } -func flattenDataFormatConversionConfiguration(dfcc *firehose.DataFormatConversionConfiguration) []map[string]interface{} { +func flattenDataFormatConversionConfiguration(dfcc *types.DataFormatConversionConfiguration) []map[string]interface{} { if dfcc == nil { return []map[string]interface{}{} } - enabled := aws.BoolValue(dfcc.Enabled) + enabled := aws.ToBool(dfcc.Enabled) ifc := flattenInputFormatConfiguration(dfcc.InputFormatConfiguration) ofc := flattenOutputFormatConfiguration(dfcc.OutputFormatConfiguration) sc := flattenSchemaConfiguration(dfcc.SchemaConfiguration) @@ -2948,7 +3190,7 @@ func flattenDataFormatConversionConfiguration(dfcc *firehose.DataFormatConversio return []map[string]interface{}{m} } -func flattenInputFormatConfiguration(ifc *firehose.InputFormatConfiguration) []map[string]interface{} { +func flattenInputFormatConfiguration(ifc *types.InputFormatConfiguration) []map[string]interface{} { if ifc == nil { return []map[string]interface{}{} } @@ -2960,7 +3202,7 @@ func flattenInputFormatConfiguration(ifc *firehose.InputFormatConfiguration) []m return []map[string]interface{}{m} } -func flattenDeserializer(deserializer *firehose.Deserializer) []map[string]interface{} { +func flattenDeserializer(deserializer *types.Deserializer) []map[string]interface{} { if deserializer == nil { return []map[string]interface{}{} } @@ -2973,26 +3215,26 @@ func flattenDeserializer(deserializer *firehose.Deserializer) []map[string]inter return []map[string]interface{}{m} } -func flattenHiveJSONSerDe(hjsd *firehose.HiveJsonSerDe) []map[string]interface{} { +func flattenHiveJSONSerDe(hjsd *types.HiveJsonSerDe) []map[string]interface{} { if hjsd == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "timestamp_formats": flex.FlattenStringList(hjsd.TimestampFormats), + "timestamp_formats": hjsd.TimestampFormats, } return []map[string]interface{}{m} } -func flattenOpenXJSONSerDe(oxjsd *firehose.OpenXJsonSerDe) []map[string]interface{} { +func flattenOpenXJSONSerDe(oxjsd *types.OpenXJsonSerDe) []map[string]interface{} { if oxjsd == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "column_to_json_key_mappings": aws.StringValueMap(oxjsd.ColumnToJsonKeyMappings), - "convert_dots_in_json_keys_to_underscores": aws.BoolValue(oxjsd.ConvertDotsInJsonKeysToUnderscores), + "column_to_json_key_mappings": oxjsd.ColumnToJsonKeyMappings, + "convert_dots_in_json_keys_to_underscores": aws.ToBool(oxjsd.ConvertDotsInJsonKeysToUnderscores), } // API omits default values @@ -3000,13 +3242,13 @@ func flattenOpenXJSONSerDe(oxjsd *firehose.OpenXJsonSerDe) []map[string]interfac m["case_insensitive"] = true if oxjsd.CaseInsensitive != nil { - m["case_insensitive"] = aws.BoolValue(oxjsd.CaseInsensitive) + m["case_insensitive"] = aws.ToBool(oxjsd.CaseInsensitive) } return []map[string]interface{}{m} } -func flattenOutputFormatConfiguration(ofc *firehose.OutputFormatConfiguration) []map[string]interface{} { +func flattenOutputFormatConfiguration(ofc *types.OutputFormatConfiguration) []map[string]interface{} { if ofc == nil { return []map[string]interface{}{} } @@ -3018,7 +3260,7 @@ func flattenOutputFormatConfiguration(ofc *firehose.OutputFormatConfiguration) [ return []map[string]interface{}{m} } -func flattenSerializer(serializer *firehose.Serializer) []map[string]interface{} { +func flattenSerializer(serializer *types.Serializer) []map[string]interface{} { if serializer == nil { return []map[string]interface{}{} } @@ -3031,15 +3273,15 @@ func flattenSerializer(serializer *firehose.Serializer) []map[string]interface{} return []map[string]interface{}{m} } -func flattenOrcSerDe(osd *firehose.OrcSerDe) []map[string]interface{} { +func flattenOrcSerDe(osd *types.OrcSerDe) []map[string]interface{} { if osd == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "bloom_filter_columns": aws.StringValueSlice(osd.BloomFilterColumns), - "dictionary_key_threshold": aws.Float64Value(osd.DictionaryKeyThreshold), - "enable_padding": aws.BoolValue(osd.EnablePadding), + "bloom_filter_columns": osd.BloomFilterColumns, + "dictionary_key_threshold": aws.ToFloat64(osd.DictionaryKeyThreshold), + "enable_padding": aws.ToBool(osd.EnablePadding), } // API omits default values @@ -3047,50 +3289,50 @@ func flattenOrcSerDe(osd *firehose.OrcSerDe) []map[string]interface{} { m["block_size_bytes"] = 268435456 if osd.BlockSizeBytes != nil { - m["block_size_bytes"] = int(aws.Int64Value(osd.BlockSizeBytes)) + m["block_size_bytes"] = int(aws.ToInt32(osd.BlockSizeBytes)) } m["bloom_filter_false_positive_probability"] = 0.05 if osd.BloomFilterFalsePositiveProbability != nil { - m["bloom_filter_false_positive_probability"] = aws.Float64Value(osd.BloomFilterFalsePositiveProbability) + m["bloom_filter_false_positive_probability"] = aws.ToFloat64(osd.BloomFilterFalsePositiveProbability) } - m["compression"] = firehose.OrcCompressionSnappy - if osd.Compression != nil { - m["compression"] = aws.StringValue(osd.Compression) + m["compression"] = types.OrcCompressionSnappy + if osd.Compression != "" { + m["compression"] = osd.Compression } - m["format_version"] = firehose.OrcFormatVersionV012 - if osd.FormatVersion != nil { - m["format_version"] = aws.StringValue(osd.FormatVersion) + m["format_version"] = types.OrcFormatVersionV012 + if osd.FormatVersion != "" { + m["format_version"] = osd.FormatVersion } m["padding_tolerance"] = 0.05 if osd.PaddingTolerance != nil { - m["padding_tolerance"] = aws.Float64Value(osd.PaddingTolerance) + m["padding_tolerance"] = aws.ToFloat64(osd.PaddingTolerance) } m["row_index_stride"] = 10000 if osd.RowIndexStride != nil { - m["row_index_stride"] = int(aws.Int64Value(osd.RowIndexStride)) + m["row_index_stride"] = int(aws.ToInt32(osd.RowIndexStride)) } m["stripe_size_bytes"] = 67108864 if osd.StripeSizeBytes != nil { - m["stripe_size_bytes"] = int(aws.Int64Value(osd.StripeSizeBytes)) + m["stripe_size_bytes"] = int(aws.ToInt32(osd.StripeSizeBytes)) } return []map[string]interface{}{m} } -func flattenParquetSerDe(psd *firehose.ParquetSerDe) []map[string]interface{} { +func flattenParquetSerDe(psd *types.ParquetSerDe) []map[string]interface{} { if psd == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "enable_dictionary_compression": aws.BoolValue(psd.EnableDictionaryCompression), - "max_padding_bytes": int(aws.Int64Value(psd.MaxPaddingBytes)), + "enable_dictionary_compression": aws.ToBool(psd.EnableDictionaryCompression), + "max_padding_bytes": int(aws.ToInt32(psd.MaxPaddingBytes)), } // API omits default values @@ -3098,45 +3340,45 @@ func flattenParquetSerDe(psd *firehose.ParquetSerDe) []map[string]interface{} { m["block_size_bytes"] = 268435456 if psd.BlockSizeBytes != nil { - m["block_size_bytes"] = int(aws.Int64Value(psd.BlockSizeBytes)) + m["block_size_bytes"] = int(aws.ToInt32(psd.BlockSizeBytes)) } - m["compression"] = firehose.ParquetCompressionSnappy - if psd.Compression != nil { - m["compression"] = aws.StringValue(psd.Compression) + m["compression"] = types.ParquetCompressionSnappy + if psd.Compression != "" { + m["compression"] = psd.Compression } m["page_size_bytes"] = 1048576 if psd.PageSizeBytes != nil { - m["page_size_bytes"] = int(aws.Int64Value(psd.PageSizeBytes)) + m["page_size_bytes"] = int(aws.ToInt32(psd.PageSizeBytes)) } - m["writer_version"] = firehose.ParquetWriterVersionV1 - if psd.WriterVersion != nil { - m["writer_version"] = aws.StringValue(psd.WriterVersion) + m["writer_version"] = types.ParquetWriterVersionV1 + if psd.WriterVersion != "" { + m["writer_version"] = psd.WriterVersion } return []map[string]interface{}{m} } -func flattenSchemaConfiguration(sc *firehose.SchemaConfiguration) []map[string]interface{} { +func flattenSchemaConfiguration(sc *types.SchemaConfiguration) []map[string]interface{} { if sc == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "catalog_id": aws.StringValue(sc.CatalogId), - "database_name": aws.StringValue(sc.DatabaseName), - "region": aws.StringValue(sc.Region), - "role_arn": aws.StringValue(sc.RoleARN), - "table_name": aws.StringValue(sc.TableName), - "version_id": aws.StringValue(sc.VersionId), + "catalog_id": aws.ToString(sc.CatalogId), + "database_name": aws.ToString(sc.DatabaseName), + "region": aws.ToString(sc.Region), + "role_arn": aws.ToString(sc.RoleARN), + "table_name": aws.ToString(sc.TableName), + "version_id": aws.ToString(sc.VersionId), } return []map[string]interface{}{m} } -func flattenHTTPEndpointRequestConfiguration(rc *firehose.HttpEndpointRequestConfiguration) []map[string]interface{} { +func flattenHTTPEndpointRequestConfiguration(rc *types.HttpEndpointRequestConfiguration) []map[string]interface{} { if rc == nil { return []map[string]interface{}{} } @@ -3145,8 +3387,8 @@ func flattenHTTPEndpointRequestConfiguration(rc *firehose.HttpEndpointRequestCon commonAttributes := make([]interface{}, 0) for _, params := range rc.CommonAttributes { - name := aws.StringValue(params.AttributeName) - value := aws.StringValue(params.AttributeValue) + name := aws.ToString(params.AttributeName) + value := aws.ToString(params.AttributeValue) commonAttributes = append(commonAttributes, map[string]interface{}{ "name": name, @@ -3156,43 +3398,36 @@ func flattenHTTPEndpointRequestConfiguration(rc *firehose.HttpEndpointRequestCon requestConfiguration[0] = map[string]interface{}{ "common_attributes": commonAttributes, - "content_encoding": aws.StringValue(rc.ContentEncoding), + "content_encoding": rc.ContentEncoding, } return requestConfiguration } -func flattenProcessingConfiguration(pc *firehose.ProcessingConfiguration, roleArn string) []map[string]interface{} { +func flattenProcessingConfiguration(pc *types.ProcessingConfiguration, destinationType destinationType, roleARN string) []map[string]interface{} { if pc == nil { return []map[string]interface{}{} } processingConfiguration := make([]map[string]interface{}, 1) - // It is necessary to explicitly filter this out - // to prevent diffs during routine use and retain the ability - // to show diffs if any field has drifted - defaultLambdaParams := map[string]string{ - "NumberOfRetries": "3", - "RoleArn": roleArn, - "BufferSizeInMBs": "3", - "BufferIntervalInSeconds": "60", - } - processors := make([]interface{}, len(pc.Processors)) for i, p := range pc.Processors { - t := aws.StringValue(p.Type) + t := p.Type parameters := make([]interface{}, 0) + // It is necessary to explicitly filter this out + // to prevent diffs during routine use and retain the ability + // to show diffs if any field has drifted. + defaultProcessorParameters := defaultProcessorParameters(destinationType, t, roleARN) + for _, params := range p.Parameters { - name := aws.StringValue(params.ParameterName) - value := aws.StringValue(params.ParameterValue) + name := params.ParameterName + value := aws.ToString(params.ParameterValue) - if t == firehose.ProcessorTypeLambda { - // Ignore defaults - if v, ok := defaultLambdaParams[name]; ok && v == value { - continue - } + // Ignore defaults. + if v, ok := defaultProcessorParameters[name]; ok && v == value { + continue } parameters = append(parameters, map[string]interface{}{ @@ -3207,13 +3442,13 @@ func flattenProcessingConfiguration(pc *firehose.ProcessingConfiguration, roleAr } } processingConfiguration[0] = map[string]interface{}{ - "enabled": aws.BoolValue(pc.Enabled), + "enabled": aws.ToBool(pc.Enabled), "processors": processors, } return processingConfiguration } -func flattenDynamicPartitioningConfiguration(dpc *firehose.DynamicPartitioningConfiguration) []map[string]interface{} { +func flattenDynamicPartitioningConfiguration(dpc *types.DynamicPartitioningConfiguration) []map[string]interface{} { if dpc == nil { return []map[string]interface{}{} } @@ -3221,57 +3456,83 @@ func flattenDynamicPartitioningConfiguration(dpc *firehose.DynamicPartitioningCo dynamicPartitioningConfiguration := make([]map[string]interface{}, 1) dynamicPartitioningConfiguration[0] = map[string]interface{}{ - "enabled": aws.BoolValue(dpc.Enabled), + "enabled": aws.ToBool(dpc.Enabled), } if dpc.RetryOptions != nil && dpc.RetryOptions.DurationInSeconds != nil { - dynamicPartitioningConfiguration[0]["retry_duration"] = int(aws.Int64Value(dpc.RetryOptions.DurationInSeconds)) + dynamicPartitioningConfiguration[0]["retry_duration"] = int(aws.ToInt32(dpc.RetryOptions.DurationInSeconds)) } return dynamicPartitioningConfiguration } -func flattenKinesisStreamSourceDescription(desc *firehose.KinesisStreamSourceDescription) []interface{} { +func flattenKinesisStreamSourceDescription(desc *types.KinesisStreamSourceDescription) []interface{} { if desc == nil { return []interface{}{} } mDesc := map[string]interface{}{ - "kinesis_stream_arn": aws.StringValue(desc.KinesisStreamARN), - "role_arn": aws.StringValue(desc.RoleARN), + "kinesis_stream_arn": aws.ToString(desc.KinesisStreamARN), + "role_arn": aws.ToString(desc.RoleARN), } return []interface{}{mDesc} } -func flattenHTTPEndpointDestinationDescription(description *firehose.HttpEndpointDestinationDescription, configuredAccessKey string) []map[string]interface{} { +func flattenHTTPEndpointDestinationDescription(description *types.HttpEndpointDestinationDescription, configuredAccessKey string) []map[string]interface{} { if description == nil { return []map[string]interface{}{} } m := map[string]interface{}{ "access_key": configuredAccessKey, - "url": aws.StringValue(description.EndpointConfiguration.Url), - "name": aws.StringValue(description.EndpointConfiguration.Name), - "role_arn": aws.StringValue(description.RoleARN), - "s3_backup_mode": aws.StringValue(description.S3BackupMode), + "url": aws.ToString(description.EndpointConfiguration.Url), + "name": aws.ToString(description.EndpointConfiguration.Name), + "role_arn": aws.ToString(description.RoleARN), + "s3_backup_mode": description.S3BackupMode, "s3_configuration": flattenS3DestinationDescription(description.S3DestinationDescription), "request_configuration": flattenHTTPEndpointRequestConfiguration(description.RequestConfiguration), "cloudwatch_logging_options": flattenCloudWatchLoggingOptions(description.CloudWatchLoggingOptions), - "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, aws.StringValue(description.RoleARN)), + "processing_configuration": flattenProcessingConfiguration(description.ProcessingConfiguration, destinationTypeHTTPEndpoint, aws.ToString(description.RoleARN)), } if description.RetryOptions != nil { - m["retry_duration"] = int(aws.Int64Value(description.RetryOptions.DurationInSeconds)) + m["retry_duration"] = int(aws.ToInt32(description.RetryOptions.DurationInSeconds)) } if description.BufferingHints != nil { - m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds)) - m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs)) + m["buffering_interval"] = int(aws.ToInt32(description.BufferingHints.IntervalInSeconds)) + m["buffering_size"] = int(aws.ToInt32(description.BufferingHints.SizeInMBs)) } return []map[string]interface{}{m} } +func expandDocumentIDOptions(tfMap map[string]interface{}) *types.DocumentIdOptions { + if tfMap == nil { + return nil + } + + apiObject := &types.DocumentIdOptions{} + + if v, ok := tfMap["default_document_id_format"].(string); ok && v != "" { + apiObject.DefaultDocumentIdFormat = types.DefaultDocumentIdFormat(v) + } + + return apiObject +} + +func flattenDocumentIDOptions(apiObject *types.DocumentIdOptions) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "default_document_id_format": apiObject.DefaultDocumentIdFormat, + } + + return tfMap +} + func isDeliveryStreamOptionDisabled(v interface{}) bool { tfList := v.([]interface{}) if len(tfList) == 0 || tfList[0] == nil { @@ -3287,3 +3548,26 @@ func isDeliveryStreamOptionDisabled(v interface{}) bool { return !enabled } + +// See https://docs.aws.amazon.com/firehose/latest/dev/data-transformation.html. +func defaultProcessorParameters(destinationType destinationType, processorType types.ProcessorType, roleARN string) map[types.ProcessorParameterName]string { + switch processorType { + case types.ProcessorTypeLambda: + params := map[types.ProcessorParameterName]string{ + types.ProcessorParameterNameLambdaNumberOfRetries: "3", + types.ProcessorParameterNameBufferIntervalInSeconds: "60", + } + if roleARN != "" { + params[types.ProcessorParameterNameRoleArn] = roleARN + } + switch destinationType { + case destinationTypeSplunk: + params[types.ProcessorParameterNameBufferSizeInMb] = "0.25" + default: + params[types.ProcessorParameterNameBufferSizeInMb] = "1" + } + return params + default: + return make(map[types.ProcessorParameterName]string) + } +} diff --git a/internal/service/firehose/delivery_stream_data_source.go b/internal/service/firehose/delivery_stream_data_source.go index 490705c8723..57a90d71c30 100644 --- a/internal/service/firehose/delivery_stream_data_source.go +++ b/internal/service/firehose/delivery_stream_data_source.go @@ -6,7 +6,7 @@ package firehose import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -14,9 +14,10 @@ import ( ) // @SDKDataSource("aws_kinesis_firehose_delivery_stream") -func DataSourceDeliveryStream() *schema.Resource { +func dataSourceDeliveryStream() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceDeliveryStreamRead, + Schema: map[string]*schema.Schema{ "arn": { Type: schema.TypeString, @@ -32,16 +33,16 @@ func DataSourceDeliveryStream() *schema.Resource { func dataSourceDeliveryStreamRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FirehoseConn(ctx) + conn := meta.(*conns.AWSClient).FirehoseClient(ctx) sn := d.Get("name").(string) - output, err := FindDeliveryStreamByName(ctx, conn, sn) + output, err := findDeliveryStreamByName(ctx, conn, sn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Kinesis Firehose Delivery Stream (%s): %s", sn, err) } - d.SetId(aws.StringValue(output.DeliveryStreamARN)) + d.SetId(aws.ToString(output.DeliveryStreamARN)) d.Set("arn", output.DeliveryStreamARN) d.Set("name", output.DeliveryStreamName) diff --git a/internal/service/firehose/delivery_stream_data_source_test.go b/internal/service/firehose/delivery_stream_data_source_test.go index 6310b1512c4..bcbb05d50dc 100644 --- a/internal/service/firehose/delivery_stream_data_source_test.go +++ b/internal/service/firehose/delivery_stream_data_source_test.go @@ -7,10 +7,10 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/firehose" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccFirehoseDeliveryStreamDataSource_basic(t *testing.T) { @@ -21,7 +21,7 @@ func TestAccFirehoseDeliveryStreamDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/firehose/delivery_stream_test.go b/internal/service/firehose/delivery_stream_test.go index a1fedd51f0f..7ae3d015558 100644 --- a/internal/service/firehose/delivery_stream_test.go +++ b/internal/service/firehose/delivery_stream_test.go @@ -10,9 +10,10 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/lambda" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/firehose" + "github.com/aws/aws-sdk-go-v2/service/firehose/types" + "github.com/aws/aws-sdk-go-v2/service/lambda" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,13 +26,13 @@ import ( func TestAccFirehoseDeliveryStream_basic(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -65,7 +66,7 @@ func TestAccFirehoseDeliveryStream_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.0.parameter_name", "LambdaArn"), resource.TestCheckResourceAttrSet(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.0.parameter_value"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_name", "BufferSizeInMBs"), - resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_value", "1"), + resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_value", "1.1"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.2.parameter_name", "BufferIntervalInSeconds"), resource.TestCheckResourceAttr(resourceName, "extended_s3_configuration.0.processing_configuration.0.processors.0.parameters.2.parameter_value", "70"), resource.TestCheckResourceAttrSet(resourceName, "extended_s3_configuration.0.role_arn"), @@ -111,13 +112,13 @@ func TestAccFirehoseDeliveryStream_basic(t *testing.T) { func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -135,13 +136,13 @@ func TestAccFirehoseDeliveryStream_disappears(t *testing.T) { func TestAccFirehoseDeliveryStream_tags(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -176,13 +177,13 @@ func TestAccFirehoseDeliveryStream_tags(t *testing.T) { func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -199,13 +200,13 @@ func TestAccFirehoseDeliveryStream_s3WithCloudWatchLogging(t *testing.T) { func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -229,13 +230,13 @@ func TestAccFirehoseDeliveryStream_extendedS3basic(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -277,13 +278,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversion_enabled(t *tes func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -303,22 +304,22 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { }, { PreConfig: func() { - conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseClient(ctx) udi := firehose.UpdateDestinationInput{ DeliveryStreamName: aws.String(rName), DestinationId: aws.String("destinationId-000000000001"), CurrentDeliveryStreamVersionId: aws.String("1"), - ExtendedS3DestinationUpdate: &firehose.ExtendedS3DestinationUpdate{ - DataFormatConversionConfiguration: &firehose.DataFormatConversionConfiguration{ + ExtendedS3DestinationUpdate: &types.ExtendedS3DestinationUpdate{ + DataFormatConversionConfiguration: &types.DataFormatConversionConfiguration{ Enabled: aws.Bool(false), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(false), - Processors: []*firehose.Processor{}, + Processors: []types.Processor{}, }, }, } - _, err := conn.UpdateDestinationWithContext(ctx, &udi) + _, err := conn.UpdateDestination(ctx, &udi) if err != nil { t.Fatalf("Unable to update firehose destination: %s", err) } @@ -337,13 +338,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_externalUpdate(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_update(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -380,13 +381,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionDeserializer_up func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_empty(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -412,13 +413,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionHiveJSONSerDe_e func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_empty(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -444,13 +445,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOpenXJSONSerDe_ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -476,13 +477,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionOrcSerDe_empty( func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_empty(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -508,13 +509,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionParquetSerDe_em func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_update(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -551,13 +552,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3DataFormatConversionSerializer_upda func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -603,13 +604,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_errorOutputPrefix(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3_S3BackupConfiguration_ErrorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -655,13 +656,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_S3BackupConfiguration_ErrorOutputP // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/12600 func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -684,13 +685,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3Processing_empty(t *testing.T) { func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription resourceName := "aws_kinesis_firehose_delivery_stream.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -713,13 +714,13 @@ func TestAccFirehoseDeliveryStream_extendedS3KMSKeyARN(t *testing.T) { func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -743,13 +744,13 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioning(t *testing.T) { func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioningUpdate(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -775,47 +776,73 @@ func TestAccFirehoseDeliveryStream_extendedS3DynamicPartitioningUpdate(t *testin func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - firstUpdateExtendedS3DestinationConfig := &firehose.ExtendedS3DestinationDescription{ - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(400), - SizeInMBs: aws.Int64(10), + firstUpdateExtendedS3DestinationConfig := &types.ExtendedS3DestinationDescription{ + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(400), + SizeInMBs: aws.Int32(10), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, }, }, }, - S3BackupMode: aws.String("Enabled"), + S3BackupMode: types.S3BackupModeEnabled, } - removeProcessorsExtendedS3DestinationConfig := &firehose.ExtendedS3DestinationDescription{ - BufferingHints: &firehose.BufferingHints{ - IntervalInSeconds: aws.Int64(400), - SizeInMBs: aws.Int64(10), + secondUpdateExtendedS3DestinationConfig := &types.ExtendedS3DestinationDescription{ + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(400), + SizeInMBs: aws.Int32(10), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ + Enabled: aws.Bool(true), + Processors: []types.Processor{ + { + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ + { + ParameterName: types.ProcessorParameterNameLambdaArn, + ParameterValue: aws.String("valueNotTested"), + }, + { + ParameterName: types.ProcessorParameterNameBufferIntervalInSeconds, + ParameterValue: aws.String("201"), + }, + }, + }, + }, + }, + S3BackupMode: types.S3BackupModeEnabled, + } + + removeProcessorsExtendedS3DestinationConfig := &types.ExtendedS3DestinationDescription{ + BufferingHints: &types.BufferingHints{ + IntervalInSeconds: aws.Int32(400), + SizeInMBs: aws.Int32(10), + }, + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(false), - Processors: []*firehose.Processor{}, + Processors: []types.Processor{}, }, - S3BackupMode: aws.String("Enabled"), + S3BackupMode: types.S3BackupModeEnabled, } resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -838,6 +865,13 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { testAccCheckDeliveryStreamAttributes(&stream, nil, firstUpdateExtendedS3DestinationConfig, nil, nil, nil, nil, nil), ), }, + { + Config: testAccDeliveryStreamConfig_extendedS3UpdatesSetBufferIntervalNoBufferSize(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDeliveryStreamExists(ctx, resourceName, &stream), + testAccCheckDeliveryStreamAttributes(&stream, nil, secondUpdateExtendedS3DestinationConfig, nil, nil, nil, nil, nil), + ), + }, { Config: testAccDeliveryStreamConfig_extendedS3UpdatesRemoveProcessors(rName), Check: resource.ComposeTestCheckFunc( @@ -851,13 +885,13 @@ func TestAccFirehoseDeliveryStream_extendedS3Updates(t *testing.T) { func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -879,13 +913,13 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_kinesisStreamSource(t *testing.T) func TestAccFirehoseDeliveryStream_ExtendedS3_mskClusterSource(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -913,23 +947,23 @@ func TestAccFirehoseDeliveryStream_ExtendedS3_mskClusterSource(t *testing.T) { func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedRedshiftConfig := &firehose.RedshiftDestinationDescription{ - CopyCommand: &firehose.CopyCommand{ + updatedRedshiftConfig := &types.RedshiftDestinationDescription{ + CopyCommand: &types.CopyCommand{ CopyOptions: aws.String("GZIP"), }, - S3BackupMode: aws.String("Enabled"), - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + S3BackupMode: types.RedshiftS3BackupModeEnabled, + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -940,7 +974,7 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -970,22 +1004,26 @@ func TestAccFirehoseDeliveryStream_redshiftUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedSplunkConfig := &firehose.SplunkDestinationDescription{ - HECEndpointType: aws.String("Event"), - HECAcknowledgmentTimeoutInSeconds: aws.Int64(600), - S3BackupMode: aws.String("FailedEventsOnly"), - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + updatedSplunkConfig := &types.SplunkDestinationDescription{ + BufferingHints: &types.SplunkBufferingHints{ + IntervalInSeconds: aws.Int32(45), + SizeInMBs: aws.Int32(3), + }, + HECEndpointType: types.HECEndpointTypeEvent, + HECAcknowledgmentTimeoutInSeconds: aws.Int32(600), + S3BackupMode: types.SplunkS3BackupModeFailedEventsOnly, + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -996,7 +1034,7 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1025,13 +1063,13 @@ func TestAccFirehoseDeliveryStream_splunkUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_Splunk_ErrorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -1075,24 +1113,24 @@ func TestAccFirehoseDeliveryStream_Splunk_ErrorOutputPrefix(t *testing.T) { func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedHTTPEndpointConfig := &firehose.HttpEndpointDestinationDescription{ - EndpointConfiguration: &firehose.HttpEndpointDescription{ + updatedHTTPEndpointConfig := &types.HttpEndpointDestinationDescription{ + EndpointConfiguration: &types.HttpEndpointDescription{ Url: aws.String("https://input-test.com:443"), Name: aws.String("HTTP_test"), }, - S3BackupMode: aws.String("FailedDataOnly"), - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + S3BackupMode: types.HttpEndpointS3BackupModeFailedDataOnly, + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1103,7 +1141,7 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1132,13 +1170,13 @@ func TestAccFirehoseDeliveryStream_httpEndpoint(t *testing.T) { func TestAccFirehoseDeliveryStream_HTTPEndpoint_ErrorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -1182,13 +1220,13 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_ErrorOutputPrefix(t *testing.T) func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1215,22 +1253,22 @@ func TestAccFirehoseDeliveryStream_HTTPEndpoint_retryDuration(t *testing.T) { func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ - BufferingHints: &firehose.ElasticsearchBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedElasticsearchConfig := &types.ElasticsearchDestinationDescription{ + BufferingHints: &types.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1241,7 +1279,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/es") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1270,22 +1308,22 @@ func TestAccFirehoseDeliveryStream_elasticSearchUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ - BufferingHints: &firehose.ElasticsearchBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedElasticsearchConfig := &types.ElasticsearchDestinationDescription{ + BufferingHints: &types.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1296,7 +1334,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/es") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1327,22 +1365,22 @@ func TestAccFirehoseDeliveryStream_elasticSearchEndpointUpdates(t *testing.T) { // when the Kinesis Firehose delivery stream has a VPC Configuration. func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedElasticsearchConfig := &firehose.ElasticsearchDestinationDescription{ - BufferingHints: &firehose.ElasticsearchBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedElasticsearchConfig := &types.ElasticsearchDestinationDescription{ + BufferingHints: &types.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1353,7 +1391,7 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/es") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1390,13 +1428,13 @@ func TestAccFirehoseDeliveryStream_elasticSearchWithVPCUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_Elasticsearch_ErrorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/es") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -1440,22 +1478,22 @@ func TestAccFirehoseDeliveryStream_Elasticsearch_ErrorOutputPrefix(t *testing.T) func TestAccFirehoseDeliveryStream_openSearchUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedOpensearchConfig := &firehose.AmazonopensearchserviceDestinationDescription{ - BufferingHints: &firehose.AmazonopensearchserviceBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedOpensearchConfig := &types.AmazonopensearchserviceDestinationDescription{ + BufferingHints: &types.AmazonopensearchserviceBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1469,7 +1507,7 @@ func TestAccFirehoseDeliveryStream_openSearchUpdates(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/opensearchservice") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1478,6 +1516,8 @@ func TestAccFirehoseDeliveryStream_openSearchUpdates(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckDeliveryStreamExists(ctx, resourceName, &stream), testAccCheckDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil, nil, nil), + resource.TestCheckResourceAttr(resourceName, "opensearch_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "opensearch_configuration.0.document_id_options.#", "0"), ), }, { @@ -1498,22 +1538,22 @@ func TestAccFirehoseDeliveryStream_openSearchUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_openSearchEndpointUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedOpensearchConfig := &firehose.AmazonopensearchserviceDestinationDescription{ - BufferingHints: &firehose.AmazonopensearchserviceBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedOpensearchConfig := &types.AmazonopensearchserviceDestinationDescription{ + BufferingHints: &types.AmazonopensearchserviceBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1527,7 +1567,7 @@ func TestAccFirehoseDeliveryStream_openSearchEndpointUpdates(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/opensearchservice") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1558,22 +1598,22 @@ func TestAccFirehoseDeliveryStream_openSearchEndpointUpdates(t *testing.T) { // when the Kinesis Firehose delivery stream has a VPC Configuration. func TestAccFirehoseDeliveryStream_openSearchWithVPCUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" - updatedOpensearchConfig := &firehose.AmazonopensearchserviceDestinationDescription{ - BufferingHints: &firehose.AmazonopensearchserviceBufferingHints{ - IntervalInSeconds: aws.Int64(500), + updatedOpensearchConfig := &types.AmazonopensearchserviceDestinationDescription{ + BufferingHints: &types.AmazonopensearchserviceBufferingHints{ + IntervalInSeconds: aws.Int32(500), }, - ProcessingConfiguration: &firehose.ProcessingConfiguration{ + ProcessingConfiguration: &types.ProcessingConfiguration{ Enabled: aws.Bool(true), - Processors: []*firehose.Processor{ + Processors: []types.Processor{ { - Type: aws.String("Lambda"), - Parameters: []*firehose.ProcessorParameter{ + Type: types.ProcessorTypeLambda, + Parameters: []types.ProcessorParameter{ { - ParameterName: aws.String("LambdaArn"), + ParameterName: types.ProcessorParameterNameLambdaArn, ParameterValue: aws.String("valueNotTested"), }, }, @@ -1587,7 +1627,7 @@ func TestAccFirehoseDeliveryStream_openSearchWithVPCUpdates(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/opensearchservice") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1624,7 +1664,7 @@ func TestAccFirehoseDeliveryStream_openSearchWithVPCUpdates(t *testing.T) { func TestAccFirehoseDeliveryStream_Opensearch_ErrorOutputPrefix(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -1633,7 +1673,7 @@ func TestAccFirehoseDeliveryStream_Opensearch_ErrorOutputPrefix(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/opensearchservice") }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx), Steps: []resource.TestStep{ @@ -1677,7 +1717,7 @@ func TestAccFirehoseDeliveryStream_Opensearch_ErrorOutputPrefix(t *testing.T) { func TestAccFirehoseDeliveryStream_openSearchServerlessUpdates(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" @@ -1686,7 +1726,7 @@ func TestAccFirehoseDeliveryStream_openSearchServerlessUpdates(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.OpenSearchServerlessEndpointID) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1778,7 +1818,7 @@ func TestAccFirehoseDeliveryStream_openSearchServerlessUpdates(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.0.parameter_name", "LambdaArn"), resource.TestCheckResourceAttrSet(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.0.parameter_value"), resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_name", "BufferSizeInMBs"), - resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_value", "1"), + resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.1.parameter_value", "1.1"), resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.2.parameter_name", "BufferIntervalInSeconds"), resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.processing_configuration.0.processors.0.parameters.2.parameter_value", "70"), resource.TestCheckResourceAttr(resourceName, "opensearchserverless_configuration.0.retry_duration", "300"), @@ -1815,13 +1855,13 @@ func TestAccFirehoseDeliveryStream_openSearchServerlessUpdates(t *testing.T) { // Regression test for https://github.com/hashicorp/terraform-provider-aws/issues/1657 func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { ctx := acctest.Context(t) - var stream firehose.DeliveryStreamDescription + var stream types.DeliveryStreamDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_kinesis_firehose_delivery_stream.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, firehose.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.FirehoseEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDeliveryStreamDestroy(ctx), Steps: []resource.TestStep{ @@ -1841,18 +1881,14 @@ func TestAccFirehoseDeliveryStream_missingProcessing(t *testing.T) { }) } -func testAccCheckDeliveryStreamExists(ctx context.Context, n string, v *firehose.DeliveryStreamDescription) resource.TestCheckFunc { +func testAccCheckDeliveryStreamExists(ctx context.Context, n string, v *types.DeliveryStreamDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Kinesis Firehose Delivery Stream ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseClient(ctx) output, err := tffirehose.FindDeliveryStreamByName(ctx, conn, rs.Primary.Attributes["name"]) @@ -1873,7 +1909,7 @@ func testAccCheckDeliveryStreamDestroy(ctx context.Context) resource.TestCheckFu continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).FirehoseClient(ctx) _, err := tffirehose.FindDeliveryStreamByName(ctx, conn, rs.Primary.Attributes["name"]) @@ -1892,7 +1928,7 @@ func testAccCheckDeliveryStreamDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, extendedS3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}, opensearchConfig interface{}, splunkConfig interface{}, httpEndpointConfig interface{}) resource.TestCheckFunc { +func testAccCheckDeliveryStreamAttributes(stream *types.DeliveryStreamDescription, s3config interface{}, extendedS3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}, opensearchConfig interface{}, splunkConfig interface{}, httpEndpointConfig interface{}) resource.TestCheckFunc { return func(s *terraform.State) error { if !strings.HasPrefix(*stream.DeliveryStreamName, "terraform-kinesis-firehose") && !strings.HasPrefix(*stream.DeliveryStreamName, acctest.ResourcePrefix) { return fmt.Errorf("Bad Stream name: %s", *stream.DeliveryStreamName) @@ -1906,7 +1942,7 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } if s3config != nil { - s := s3config.(*firehose.S3DestinationDescription) + s := s3config.(*types.S3DestinationDescription) // Range over the Stream Destinations, looking for the matching S3 // destination. For simplicity, our test only have a single S3 or // Redshift destination, so at this time it's safe to match on the first @@ -1920,12 +1956,12 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !match { - return fmt.Errorf("Mismatch s3 buffer size, expected: %s, got: %s", s, stream.Destinations) + return fmt.Errorf("Mismatch s3 buffer size, expected: %v, got: %v", s, stream.Destinations) } } if extendedS3config != nil { - es := extendedS3config.(*firehose.ExtendedS3DestinationDescription) + es := extendedS3config.(*types.ExtendedS3DestinationDescription) // Range over the Stream Destinations, looking for the matching S3 // destination. For simplicity, our test only have a single S3 or @@ -1937,7 +1973,7 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip if *d.ExtendedS3DestinationDescription.BufferingHints.SizeInMBs == *es.BufferingHints.SizeInMBs { match = true } - if *d.ExtendedS3DestinationDescription.S3BackupMode == *es.S3BackupMode { + if d.ExtendedS3DestinationDescription.S3BackupMode == es.S3BackupMode { matchS3BackupMode = true } @@ -1945,18 +1981,18 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !match { - return fmt.Errorf("Mismatch extended s3 buffer size, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch extended s3 buffer size, expected: %v, got: %v", es, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch extended s3 ProcessingConfiguration.Processors count, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch extended s3 ProcessingConfiguration.Processors count, expected: %v, got: %v", es, stream.Destinations) } if !matchS3BackupMode { - return fmt.Errorf("Mismatch extended s3 S3BackupMode, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch extended s3 S3BackupMode, expected: %v, got: %v", es, stream.Destinations) } } if redshiftConfig != nil { - r := redshiftConfig.(*firehose.RedshiftDestinationDescription) + r := redshiftConfig.(*types.RedshiftDestinationDescription) // Range over the Stream Destinations, looking for the matching Redshift // destination var matchCopyOptions, matchS3BackupMode, processingConfigMatch bool @@ -1965,7 +2001,7 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip if *d.RedshiftDestinationDescription.CopyCommand.CopyOptions == *r.CopyCommand.CopyOptions { matchCopyOptions = true } - if *d.RedshiftDestinationDescription.S3BackupMode == *r.S3BackupMode { + if d.RedshiftDestinationDescription.S3BackupMode == r.S3BackupMode { matchS3BackupMode = true } if r.ProcessingConfiguration != nil && d.RedshiftDestinationDescription.ProcessingConfiguration != nil { @@ -1974,15 +2010,15 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !matchCopyOptions || !matchS3BackupMode { - return fmt.Errorf("Mismatch Redshift CopyOptions or S3BackupMode, expected: %s, got: %s", r, stream.Destinations) + return fmt.Errorf("Mismatch Redshift CopyOptions or S3BackupMode, expected: %v, got: %v", r, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch Redshift ProcessingConfiguration.Processors count, expected: %s, got: %s", r, stream.Destinations) + return fmt.Errorf("Mismatch Redshift ProcessingConfiguration.Processors count, expected: %v, got: %v", r, stream.Destinations) } } if elasticsearchConfig != nil { - es := elasticsearchConfig.(*firehose.ElasticsearchDestinationDescription) + es := elasticsearchConfig.(*types.ElasticsearchDestinationDescription) // Range over the Stream Destinations, looking for the matching Elasticsearch destination var match, processingConfigMatch bool for _, d := range stream.Destinations { @@ -1994,15 +2030,15 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !match { - return fmt.Errorf("Mismatch Elasticsearch Buffering Interval, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch Elasticsearch Buffering Interval, expected: %v, got: %v", es, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch Elasticsearch ProcessingConfiguration.Processors count, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch Elasticsearch ProcessingConfiguration.Processors count, expected: %v, got: %v", es, stream.Destinations) } } if opensearchConfig != nil { - es := opensearchConfig.(*firehose.AmazonopensearchserviceDestinationDescription) + es := opensearchConfig.(*types.AmazonopensearchserviceDestinationDescription) // Range over the Stream Destinations, looking for the matching Opensearch destination var match, processingConfigMatch bool for _, d := range stream.Destinations { @@ -2014,26 +2050,29 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !match { - return fmt.Errorf("Mismatch Opensearch Buffering Interval, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch Opensearch Buffering Interval, expected: %v, got: %v", es, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch Opensearch ProcessingConfiguration.Processors count, expected: %s, got: %s", es, stream.Destinations) + return fmt.Errorf("Mismatch Opensearch ProcessingConfiguration.Processors count, expected: %v, got: %v", es, stream.Destinations) } } if splunkConfig != nil { - s := splunkConfig.(*firehose.SplunkDestinationDescription) + s := splunkConfig.(*types.SplunkDestinationDescription) // Range over the Stream Destinations, looking for the matching Splunk destination - var matchHECEndpointType, matchHECAcknowledgmentTimeoutInSeconds, matchS3BackupMode, processingConfigMatch bool + var match, matchHECEndpointType, matchHECAcknowledgmentTimeoutInSeconds, matchS3BackupMode, processingConfigMatch bool for _, d := range stream.Destinations { if d.SplunkDestinationDescription != nil { - if *d.SplunkDestinationDescription.HECEndpointType == *s.HECEndpointType { + if *d.SplunkDestinationDescription.BufferingHints.SizeInMBs == *s.BufferingHints.SizeInMBs { + match = true + } + if d.SplunkDestinationDescription.HECEndpointType == s.HECEndpointType { matchHECEndpointType = true } if *d.SplunkDestinationDescription.HECAcknowledgmentTimeoutInSeconds == *s.HECAcknowledgmentTimeoutInSeconds { matchHECAcknowledgmentTimeoutInSeconds = true } - if *d.SplunkDestinationDescription.S3BackupMode == *s.S3BackupMode { + if d.SplunkDestinationDescription.S3BackupMode == s.S3BackupMode { matchS3BackupMode = true } if s.ProcessingConfiguration != nil && d.SplunkDestinationDescription.ProcessingConfiguration != nil { @@ -2041,21 +2080,21 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } } - if !matchHECEndpointType || !matchHECAcknowledgmentTimeoutInSeconds || !matchS3BackupMode { - return fmt.Errorf("Mismatch Splunk HECEndpointType or HECAcknowledgmentTimeoutInSeconds or S3BackupMode, expected: %s, got: %s", s, stream.Destinations) + if !match || !matchHECEndpointType || !matchHECAcknowledgmentTimeoutInSeconds || !matchS3BackupMode { + return fmt.Errorf("Mismatch Splunk BufferingHints.SizeInMBs or HECEndpointType or HECAcknowledgmentTimeoutInSeconds or S3BackupMode, expected: %v, got: %v", s, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch extended splunk ProcessingConfiguration.Processors count, expected: %s, got: %s", s, stream.Destinations) + return fmt.Errorf("Mismatch extended splunk ProcessingConfiguration.Processors count, expected: %v, got: %v", s, stream.Destinations) } } if httpEndpointConfig != nil { - s := httpEndpointConfig.(*firehose.HttpEndpointDestinationDescription) + s := httpEndpointConfig.(*types.HttpEndpointDestinationDescription) // Range over the Stream Destinations, looking for the matching HttpEndpoint destination var matchS3BackupMode, matchUrl, matchName, processingConfigMatch bool for _, d := range stream.Destinations { if d.HttpEndpointDestinationDescription != nil { - if *d.HttpEndpointDestinationDescription.S3BackupMode == *s.S3BackupMode { + if d.HttpEndpointDestinationDescription.S3BackupMode == s.S3BackupMode { matchS3BackupMode = true } if *d.HttpEndpointDestinationDescription.EndpointConfiguration.Url == *s.EndpointConfiguration.Url { @@ -2070,13 +2109,13 @@ func testAccCheckDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescrip } } if !matchS3BackupMode { - return fmt.Errorf("Mismatch HTTP Endpoint S3BackupMode, expected: %s, got: %s", s, stream.Destinations) + return fmt.Errorf("Mismatch HTTP Endpoint S3BackupMode, expected: %v, got: %v", s, stream.Destinations) } if !matchUrl || !matchName { - return fmt.Errorf("Mismatch HTTP Endpoint EndpointConfiguration, expected: %s, got: %s", s, stream.Destinations) + return fmt.Errorf("Mismatch HTTP Endpoint EndpointConfiguration, expected: %v, got: %v", s, stream.Destinations) } if !processingConfigMatch { - return fmt.Errorf("Mismatch HTTP Endpoint ProcessingConfiguration.Processors count, expected: %s, got: %s", s, stream.Destinations) + return fmt.Errorf("Mismatch HTTP Endpoint ProcessingConfiguration.Processors count, expected: %v, got: %v", s, stream.Destinations) } } } @@ -2098,14 +2137,14 @@ func testAccCheckDeliveryStreamDestroy_ExtendedS3(ctx context.Context) resource. func testAccCheckLambdaFunctionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).LambdaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lambda_function" { continue } - _, err := conn.GetFunctionWithContext(ctx, &lambda.GetFunctionInput{ + _, err := conn.GetFunction(ctx, &lambda.GetFunctionInput{ FunctionName: aws.String(rs.Primary.ID), }) @@ -2603,7 +2642,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3095,7 +3134,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3142,7 +3181,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3210,7 +3249,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3233,6 +3272,51 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { `, rName)) } +func testAccDeliveryStreamConfig_extendedS3UpdatesSetBufferIntervalNoBufferSize(rName string) string { + return acctest.ConfigCompose( + testAccDeliveryStreamConfig_baseLambda(rName), + testAccDeliveryStreamConfig_base(rName), + fmt.Sprintf(` +resource "aws_kinesis_firehose_delivery_stream" "test" { + depends_on = [aws_iam_role_policy.firehose] + name = %[1]q + destination = "extended_s3" + + extended_s3_configuration { + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.bucket.arn + + processing_configuration { + enabled = true + + processors { + type = "Lambda" + + parameters { + parameter_name = "LambdaArn" + parameter_value = "${aws_lambda_function.lambda_function_test.arn}:$LATEST" + } + parameters { + parameter_name = "BufferIntervalInSeconds" + parameter_value = 201 + } + } + } + + buffering_size = 10 + buffering_interval = 400 + compression_format = "GZIP" + s3_backup_mode = "Enabled" + + s3_backup_configuration { + role_arn = aws_iam_role.firehose.arn + bucket_arn = aws_s3_bucket.bucket.arn + } + } +} +`, rName)) +} + func testAccDeliveryStreamConfig_extendedS3UpdatesRemoveProcessors(rName string) string { return acctest.ConfigCompose( testAccDeliveryStreamConfig_baseLambda(rName), @@ -3354,7 +3438,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3398,6 +3482,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { destination = "splunk" splunk_configuration { + buffering_interval = 45 + buffering_size = 3 hec_endpoint = "https://input-test.com:443" hec_token = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A" hec_acknowledgment_timeout = 600 @@ -3430,12 +3516,12 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { parameters { parameter_name = "BufferSizeInMBs" - parameter_value = 1 + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" - parameter_value = 120 + parameter_value = "70" } } } @@ -3568,12 +3654,12 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { parameters { parameter_name = "BufferSizeInMBs" - parameter_value = 1 + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" - parameter_value = 120 + parameter_value = "70" } } } @@ -3809,7 +3895,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3861,7 +3947,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -3930,7 +4016,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -4135,7 +4221,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -4232,7 +4318,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -4283,7 +4369,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" @@ -4396,7 +4482,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { } parameters { parameter_name = "BufferSizeInMBs" - parameter_value = "1" + parameter_value = "1.1" } parameters { parameter_name = "BufferIntervalInSeconds" diff --git a/internal/service/firehose/exports_test.go b/internal/service/firehose/exports_test.go new file mode 100644 index 00000000000..9f2c60977fa --- /dev/null +++ b/internal/service/firehose/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package firehose + +// Exports for use in tests only. +var ( + ResourceDeliveryStream = resourceDeliveryStream + + FindDeliveryStreamByName = findDeliveryStreamByName +) diff --git a/internal/service/firehose/find.go b/internal/service/firehose/find.go deleted file mode 100644 index 08fdad4747f..00000000000 --- a/internal/service/firehose/find.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package firehose - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindDeliveryStreamByName(ctx context.Context, conn *firehose.Firehose, name string) (*firehose.DeliveryStreamDescription, error) { - input := &firehose.DescribeDeliveryStreamInput{ - DeliveryStreamName: aws.String(name), - } - - output, err := conn.DescribeDeliveryStreamWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, firehose.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.DeliveryStreamDescription == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.DeliveryStreamDescription, nil -} - -func FindDeliveryStreamEncryptionConfigurationByName(ctx context.Context, conn *firehose.Firehose, name string) (*firehose.DeliveryStreamEncryptionConfiguration, error) { - output, err := FindDeliveryStreamByName(ctx, conn, name) - - if err != nil { - return nil, err - } - - if output.DeliveryStreamEncryptionConfiguration == nil { - return nil, tfresource.NewEmptyResultError(nil) - } - - return output.DeliveryStreamEncryptionConfiguration, nil -} diff --git a/internal/service/firehose/generate.go b/internal/service/firehose/generate.go index 51d3a4c78ab..6c3c7a04c98 100644 --- a/internal/service/firehose/generate.go +++ b/internal/service/firehose/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=ListTagsForDeliveryStream -ListTagsInIDElem=DeliveryStreamName -ServiceTagsSlice -TagOp=TagDeliveryStream -TagInIDElem=DeliveryStreamName -UntagOp=UntagDeliveryStream -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsOp=ListTagsForDeliveryStream -ListTagsInIDElem=DeliveryStreamName -ServiceTagsSlice -TagOp=TagDeliveryStream -TagInIDElem=DeliveryStreamName -UntagOp=UntagDeliveryStream -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/firehose/list_pages.go b/internal/service/firehose/list_pages.go index 77d13ff106f..b9a2d43f451 100644 --- a/internal/service/firehose/list_pages.go +++ b/internal/service/firehose/list_pages.go @@ -6,25 +6,25 @@ package firehose import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/firehose" ) // Custom Kinesis Firehose service lister functions using the same format as generated code. -func listDeliveryStreamsPages(ctx context.Context, conn *firehose.Firehose, input *firehose.ListDeliveryStreamsInput, fn func(*firehose.ListDeliveryStreamsOutput, bool) bool) error { +func listDeliveryStreamsPages(ctx context.Context, conn *firehose.Client, input *firehose.ListDeliveryStreamsInput, fn func(*firehose.ListDeliveryStreamsOutput, bool) bool) error { for { - output, err := conn.ListDeliveryStreamsWithContext(ctx, input) + output, err := conn.ListDeliveryStreams(ctx, input) if err != nil { return err } - lastPage := !aws.BoolValue(output.HasMoreDeliveryStreams) + lastPage := !aws.ToBool(output.HasMoreDeliveryStreams) if !fn(output, lastPage) || lastPage { break } - input.ExclusiveStartDeliveryStreamName = output.DeliveryStreamNames[len(output.DeliveryStreamNames)-1] + input.ExclusiveStartDeliveryStreamName = aws.String(output.DeliveryStreamNames[len(output.DeliveryStreamNames)-1]) } return nil } diff --git a/internal/service/firehose/service_package_gen.go b/internal/service/firehose/service_package_gen.go index 2453db4b3d6..02804443c2c 100644 --- a/internal/service/firehose/service_package_gen.go +++ b/internal/service/firehose/service_package_gen.go @@ -5,9 +5,8 @@ package firehose import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - firehose_sdkv1 "github.com/aws/aws-sdk-go/service/firehose" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + firehose_sdkv2 "github.com/aws/aws-sdk-go-v2/service/firehose" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -26,7 +25,7 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceDeliveryStream, + Factory: dataSourceDeliveryStream, TypeName: "aws_kinesis_firehose_delivery_stream", }, } @@ -35,7 +34,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceDeliveryStream, + Factory: resourceDeliveryStream, TypeName: "aws_kinesis_firehose_delivery_stream", Name: "Delivery Stream", Tags: &types.ServicePackageResourceTags{ @@ -49,11 +48,15 @@ func (p *servicePackage) ServicePackageName() string { return names.Firehose } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*firehose_sdkv1.Firehose, error) { - sess := config["session"].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*firehose_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return firehose_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil + return firehose_sdkv2.NewFromConfig(cfg, func(o *firehose_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/firehose/status.go b/internal/service/firehose/status.go deleted file mode 100644 index 77f584abc57..00000000000 --- a/internal/service/firehose/status.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package firehose - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusDeliveryStream(ctx context.Context, conn *firehose.Firehose, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDeliveryStreamByName(ctx, conn, name) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.DeliveryStreamStatus), nil - } -} - -func statusDeliveryStreamEncryptionConfiguration(ctx context.Context, conn *firehose.Firehose, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDeliveryStreamEncryptionConfigurationByName(ctx, conn, name) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/firehose/sweep.go b/internal/service/firehose/sweep.go index 63c10039464..e506f469ddd 100644 --- a/internal/service/firehose/sweep.go +++ b/internal/service/firehose/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/firehose" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/firehose" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -28,7 +27,7 @@ func sweepDeliveryStreams(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.FirehoseConn(ctx) + conn := client.FirehoseClient(ctx) input := &firehose.ListDeliveryStreamsInput{} sweepResources := make([]sweep.Sweepable, 0) @@ -38,12 +37,12 @@ func sweepDeliveryStreams(region string) error { } for _, v := range page.DeliveryStreamNames { - r := ResourceDeliveryStream() + r := resourceDeliveryStream() d := r.Data(nil) - name := aws.StringValue(v) + name := v arn := arn.ARN{ Partition: client.Partition, - Service: firehose.ServiceName, + Service: "firehose", Region: client.Region, AccountID: client.AccountID, Resource: fmt.Sprintf("deliverystream/%s", name), @@ -57,7 +56,7 @@ func sweepDeliveryStreams(region string) error { return !lastPage }) - if awsv1.SkipSweepError(err) { + if awsv2.SkipSweepError(err) { log.Printf("[WARN] Skipping Kinesis Firehose Delivery Stream sweep for %s: %s", region, err) return nil } diff --git a/internal/service/firehose/tags_gen.go b/internal/service/firehose/tags_gen.go index 806e3854ea3..8bf50336e30 100644 --- a/internal/service/firehose/tags_gen.go +++ b/internal/service/firehose/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/aws/aws-sdk-go/service/firehose/firehoseiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/firehose" + awstypes "github.com/aws/aws-sdk-go-v2/service/firehose/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists firehose service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *firehose.Client, identifier string, optFns ...func(*firehose.Options)) (tftags.KeyValueTags, error) { input := &firehose.ListTagsForDeliveryStreamInput{ DeliveryStreamName: aws.String(identifier), } - output, err := conn.ListTagsForDeliveryStreamWithContext(ctx, input) + output, err := conn.ListTagsForDeliveryStream(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier st // ListTags lists firehose service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).FirehoseConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).FirehoseClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns firehose service tags. -func Tags(tags tftags.KeyValueTags) []*firehose.Tag { - result := make([]*firehose.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &firehose.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*firehose.Tag { } // KeyValueTags creates tftags.KeyValueTags from firehose service tags. -func KeyValueTags(ctx context.Context, tags []*firehose.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*firehose.Tag) tftags.KeyValueTags // getTagsIn returns firehose service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*firehose.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*firehose.Tag { } // setTagsOut sets firehose service tags in Context. -func setTagsOut(ctx context.Context, tags []*firehose.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*firehose.Tag) { // updateTags updates firehose service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *firehose.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*firehose.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier if len(removedTags) > 0 { input := &firehose.UntagDeliveryStreamInput{ DeliveryStreamName: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagDeliveryStreamWithContext(ctx, input) + _, err := conn.UntagDeliveryStream(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier Tags: Tags(updatedTags), } - _, err := conn.TagDeliveryStreamWithContext(ctx, input) + _, err := conn.TagDeliveryStream(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn firehoseiface.FirehoseAPI, identifier // UpdateTags updates firehose service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).FirehoseConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).FirehoseClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/firehose/wait.go b/internal/service/firehose/wait.go deleted file mode 100644 index 6b99127077b..00000000000 --- a/internal/service/firehose/wait.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package firehose - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/firehose" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func waitDeliveryStreamCreated(ctx context.Context, conn *firehose.Firehose, name string, timeout time.Duration) (*firehose.DeliveryStreamDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{firehose.DeliveryStreamStatusCreating}, - Target: []string{firehose.DeliveryStreamStatusActive}, - Refresh: statusDeliveryStream(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*firehose.DeliveryStreamDescription); ok { - if status, failureDescription := aws.StringValue(output.DeliveryStreamStatus), output.FailureDescription; status == firehose.DeliveryStreamStatusCreatingFailed && failureDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(failureDescription.Type), aws.StringValue(failureDescription.Details))) - } - - return output, err - } - - return nil, err -} - -func waitDeliveryStreamDeleted(ctx context.Context, conn *firehose.Firehose, name string, timeout time.Duration) (*firehose.DeliveryStreamDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{firehose.DeliveryStreamStatusDeleting}, - Target: []string{}, - Refresh: statusDeliveryStream(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*firehose.DeliveryStreamDescription); ok { - if status, failureDescription := aws.StringValue(output.DeliveryStreamStatus), output.FailureDescription; status == firehose.DeliveryStreamStatusDeletingFailed && failureDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(failureDescription.Type), aws.StringValue(failureDescription.Details))) - } - - return output, err - } - - return nil, err -} - -func waitDeliveryStreamEncryptionEnabled(ctx context.Context, conn *firehose.Firehose, name string, timeout time.Duration) (*firehose.DeliveryStreamEncryptionConfiguration, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{firehose.DeliveryStreamEncryptionStatusEnabling}, - Target: []string{firehose.DeliveryStreamEncryptionStatusEnabled}, - Refresh: statusDeliveryStreamEncryptionConfiguration(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*firehose.DeliveryStreamEncryptionConfiguration); ok { - if status, failureDescription := aws.StringValue(output.Status), output.FailureDescription; status == firehose.DeliveryStreamEncryptionStatusEnablingFailed && failureDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(failureDescription.Type), aws.StringValue(failureDescription.Details))) - } - - return output, err - } - - return nil, err -} - -func waitDeliveryStreamEncryptionDisabled(ctx context.Context, conn *firehose.Firehose, name string, timeout time.Duration) (*firehose.DeliveryStreamEncryptionConfiguration, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{firehose.DeliveryStreamEncryptionStatusDisabling}, - Target: []string{firehose.DeliveryStreamEncryptionStatusDisabled}, - Refresh: statusDeliveryStreamEncryptionConfiguration(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*firehose.DeliveryStreamEncryptionConfiguration); ok { - if status, failureDescription := aws.StringValue(output.Status), output.FailureDescription; status == firehose.DeliveryStreamEncryptionStatusDisablingFailed && failureDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(failureDescription.Type), aws.StringValue(failureDescription.Details))) - } - - return output, err - } - - return nil, err -} diff --git a/names/data/names_data.csv b/names/data/names_data.csv index 7c83c019049..d50bcc91a0f 100644 --- a/names/data/names_data.csv +++ b/names/data/names_data.csv @@ -206,7 +206,7 @@ keyspaces,keyspaces,keyspaces,keyspaces,,keyspaces,,,Keyspaces,Keyspaces,,,2,,aw kinesis,kinesis,kinesis,kinesis,,kinesis,,,Kinesis,Kinesis,,1,,aws_kinesis_stream,aws_kinesis_,,kinesis_stream,Kinesis,Amazon,,,,,,, kinesisanalytics,kinesisanalytics,kinesisanalytics,kinesisanalytics,,kinesisanalytics,,,KinesisAnalytics,KinesisAnalytics,,1,,aws_kinesis_analytics_,aws_kinesisanalytics_,,kinesis_analytics_,Kinesis Analytics,Amazon,,,,,,, kinesisanalyticsv2,kinesisanalyticsv2,kinesisanalyticsv2,kinesisanalyticsv2,,kinesisanalyticsv2,,,KinesisAnalyticsV2,KinesisAnalyticsV2,,1,,,aws_kinesisanalyticsv2_,,kinesisanalyticsv2_,Kinesis Analytics V2,Amazon,,,,,,, -firehose,firehose,firehose,firehose,,firehose,,,Firehose,Firehose,,1,,aws_kinesis_firehose_,aws_firehose_,,kinesis_firehose_,Kinesis Firehose,Amazon,,,,,,, +firehose,firehose,firehose,firehose,,firehose,,,Firehose,Firehose,,,2,aws_kinesis_firehose_,aws_firehose_,,kinesis_firehose_,Kinesis Firehose,Amazon,,,,,,, kinesisvideo,kinesisvideo,kinesisvideo,kinesisvideo,,kinesisvideo,,,KinesisVideo,KinesisVideo,,1,,,aws_kinesisvideo_,,kinesis_video_,Kinesis Video,Amazon,,,,,,, kinesis-video-archived-media,kinesisvideoarchivedmedia,kinesisvideoarchivedmedia,kinesisvideoarchivedmedia,,kinesisvideoarchivedmedia,,,KinesisVideoArchivedMedia,KinesisVideoArchivedMedia,,1,,,aws_kinesisvideoarchivedmedia_,,kinesisvideoarchivedmedia_,Kinesis Video Archived Media,Amazon,,x,,,,, kinesis-video-media,kinesisvideomedia,kinesisvideomedia,kinesisvideomedia,,kinesisvideomedia,,,KinesisVideoMedia,KinesisVideoMedia,,1,,,aws_kinesisvideomedia_,,kinesisvideomedia_,Kinesis Video Media,Amazon,,x,,,,, diff --git a/names/names.go b/names/names.go index ca113a0aa02..b40714b4082 100644 --- a/names/names.go +++ b/names/names.go @@ -53,6 +53,7 @@ const ( EMREndpointID = "elasticmapreduce" EMRServerlessEndpointID = "emrserverless" EvidentlyEndpointID = "evidently" + FirehoseEndpointID = "firehose" GlacierEndpointID = "glacier" GroundStationEndpointID = "groundstation" IdentityStoreEndpointID = "identitystore" diff --git a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown index 12314991864..1188309ff29 100644 --- a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown @@ -659,7 +659,7 @@ The `redshift_configuration` object supports the following: The `elasticsearch_configuration` object supports the following: -* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s. +* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. * `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. * `domain_arn` - (Optional) The ARN of the Amazon ES domain. The pattern needs to be `arn:.*`. Conflicts with `cluster_endpoint`. * `cluster_endpoint` - (Optional) The endpoint to use when communicating with the cluster. Conflicts with `domain_arn`. @@ -676,7 +676,7 @@ The `elasticsearch_configuration` object supports the following: The `opensearch_configuration` object supports the following: -* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s. +* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. * `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. * `domain_arn` - (Optional) The ARN of the Amazon ES domain. The pattern needs to be `arn:.*`. Conflicts with `cluster_endpoint`. * `cluster_endpoint` - (Optional) The endpoint to use when communicating with the cluster. Conflicts with `domain_arn`. @@ -687,13 +687,14 @@ The `opensearch_configuration` object supports the following: * `s3_configuration` - (Required) The S3 Configuration. See [s3_configuration](#s3-configuration) for more details. * `s3_backup_mode` - (Optional) Defines how documents should be delivered to Amazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`. * `type_name` - (Optional) The Elasticsearch type name with maximum length of 100 characters. Types are deprecated in OpenSearch_1.1. TypeName must be empty. -* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below -* `vpc_config` - (Optional) The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. More details are given below -* `processing_configuration` - (Optional) The data processing configuration. More details are given below. +* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below. +* `vpc_config` - (Optional) The VPC configuration for the delivery stream to connect to OpenSearch associated with the VPC. More details are given below. +* `processing_configuration` - (Optional) The data processing configuration. More details are given below. +* `document_id_options` - (Optional) The method for setting up document ID. More details are given below. The `opensearchserverless_configuration` object supports the following: -* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s. +* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 0 to 900, before delivering it to the destination. The default value is 300s. * `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. * `collection_endpoint` - (Required) The endpoint to use when communicating with the collection in the Serverless offering for Amazon OpenSearch Service. * `index_name` - (Required) The Serverless offering for Amazon OpenSearch Service index name. @@ -707,6 +708,8 @@ The `opensearchserverless_configuration` object supports the following: The `splunk_configuration` objects supports the following: +* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 0 to 60, before delivering it to the destination. The default value is 60s. +* `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 5, before delivering it to the destination. The default value is 5MB. * `hec_acknowledgment_timeout` - (Optional) The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. * `hec_endpoint` - (Required) The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. * `hec_endpoint_type` - (Optional) The HEC endpoint type. Valid values are `Raw` or `Event`. The default value is `Raw`. @@ -753,7 +756,7 @@ The `parameters` array objects support the following: * `parameter_name` - (Required) Parameter name. Valid Values: `LambdaArn`, `NumberOfRetries`, `MetadataExtractionQuery`, `JsonParsingEngine`, `RoleArn`, `BufferSizeInMBs`, `BufferIntervalInSeconds`, `SubRecordType`, `Delimiter`. Validation is done against [AWS SDK constants](https://docs.aws.amazon.com/sdk-for-go/api/service/firehose/#pkg-constants); so that values not explicitly listed may also work. * `parameter_value` - (Required) Parameter value. Must be between 1 and 512 length (inclusive). When providing a Lambda ARN, you should specify the resource version as well. -~> **NOTE:** Parameters with default values, including `NumberOfRetries`(default: 3), `RoleArn`(default: firehose role ARN), `BufferSizeInMBs`(default: 3), and `BufferIntervalInSeconds`(default: 60), are not stored in terraform state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values. +~> **NOTE:** Parameters with default values, including `NumberOfRetries`(default: 3), `RoleArn`(default: firehose role ARN), `BufferSizeInMBs`(default: 1), and `BufferIntervalInSeconds`(default: 60), are not stored in terraform state. To prevent perpetual differences, it is therefore recommended to only include parameters with non-default values. The `request_configuration` object supports the following: @@ -899,6 +902,10 @@ Required when using [dynamic partitioning](https://docs.aws.amazon.com/firehose/ ~> **NOTE:** You can enable dynamic partitioning only when you create a new delivery stream. Once you enable dynamic partitioning on a delivery stream, it cannot be disabled on this delivery stream. Therefore, Terraform will recreate the resource whenever dynamic partitioning is enabled or disabled. +### document_id_options + +* `default_document_id_format` - (Required) The method for setting up document ID. Valid values: `FIREHOSE_DEFAULT`, `NO_DOCUMENT_ID`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: