Skip to content

Commit

Permalink
Merge pull request #31141 from hashicorp/b-consistent-firehose-buffer
Browse files Browse the repository at this point in the history
firehose/delivery_stream: Make buffering args consistent
  • Loading branch information
YakDriver authored May 3, 2023
2 parents b6513e0 + d5d7c88 commit 27df69a
Show file tree
Hide file tree
Showing 4 changed files with 73 additions and 62 deletions.
11 changes: 11 additions & 0 deletions .changelog/31141.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
```release-note:breaking-change
resource/aws_kinesis_firehose_delivery_stream: Rename `s3_configuration.0.buffer_size` and `s3_configuration.0.buffer_internval` to `s3_configuration.0.buffering_size` and `s3_configuration.0.buffering_internval`, respectively
```

```release-note:breaking-change
resource/aws_kinesis_firehose_delivery_stream: Rename `redshift_configuration.0.s3_backup_configuration.0.buffer_size` and `redshift_configuration.0.s3_backup_configuration.0.buffer_interval` to `redshift_configuration.0.s3_backup_configuration.0.buffering_size` and `redshift_configuration.0.s3_backup_configuration.0.buffering_interval`, respectively
```

```release-note:breaking-change
resource/aws_kinesis_firehose_delivery_stream: Rename `extended_s3_configuration.0.s3_backup_configuration.0.buffer_size` and `extended_s3_configuration.0.s3_backup_configuration.0.buffer_interval` to `extended_s3_configuration.0.s3_backup_configuration.0.buffering_size` and `extended_s3_configuration.0.s3_backup_configuration.0.buffering_interval`, respectively
```
40 changes: 20 additions & 20 deletions internal/service/firehose/delivery_stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,14 +137,14 @@ func s3ConfigurationSchema() *schema.Schema {
ValidateFunc: verify.ValidARN,
},

"buffer_size": {
"buffering_size": {
Type: schema.TypeInt,
Optional: true,
Default: 5,
ValidateFunc: validation.IntAtLeast(1),
},

"buffer_interval": {
"buffering_interval": {
Type: schema.TypeInt,
Optional: true,
Default: 300,
Expand Down Expand Up @@ -357,8 +357,8 @@ func flattenExtendedS3Configuration(description *firehose.ExtendedS3DestinationD
}

if description.BufferingHints != nil {
m["buffer_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds))
m["buffer_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs))
m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds))
m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs))
}

if description.EncryptionConfiguration != nil && description.EncryptionConfiguration.KMSEncryptionConfig != nil {
Expand Down Expand Up @@ -433,8 +433,8 @@ func flattenS3Configuration(description *firehose.S3DestinationDescription) []ma
}

if description.BufferingHints != nil {
m["buffer_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds))
m["buffer_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs))
m["buffering_interval"] = int(aws.Int64Value(description.BufferingHints.IntervalInSeconds))
m["buffering_size"] = int(aws.Int64Value(description.BufferingHints.SizeInMBs))
}

if description.EncryptionConfiguration != nil && description.EncryptionConfiguration.KMSEncryptionConfig != nil {
Expand Down Expand Up @@ -1026,13 +1026,13 @@ func ResourceDeliveryStream() *schema.Resource {
ValidateFunc: verify.ValidARN,
},

"buffer_size": {
"buffering_size": {
Type: schema.TypeInt,
Optional: true,
Default: 5,
},

"buffer_interval": {
"buffering_interval": {
Type: schema.TypeInt,
Optional: true,
Default: 300,
Expand Down Expand Up @@ -1786,8 +1786,8 @@ func createS3Config(d *schema.ResourceData) *firehose.S3DestinationConfiguration
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64(int64(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))),
},
Prefix: extractPrefixConfiguration(s3),
CompressionFormat: aws.String(s3["compression_format"].(string)),
Expand Down Expand Up @@ -1817,8 +1817,8 @@ func expandS3BackupConfig(d map[string]interface{}) *firehose.S3DestinationConfi
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64(int64(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))),
},
Prefix: extractPrefixConfiguration(s3),
CompressionFormat: aws.String(s3["compression_format"].(string)),
Expand All @@ -1843,8 +1843,8 @@ func createExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64(int64(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64(int64(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64(int64(s3["buffering_size"].(int))),
},
Prefix: extractPrefixConfiguration(s3),
CompressionFormat: aws.String(s3["compression_format"].(string)),
Expand Down Expand Up @@ -1883,8 +1883,8 @@ func updateS3Config(d *schema.ResourceData) *firehose.S3DestinationUpdate {
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64((int64)(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))),
},
ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)),
Prefix: extractPrefixConfiguration(s3),
Expand Down Expand Up @@ -1912,8 +1912,8 @@ func updateS3BackupConfig(d map[string]interface{}) *firehose.S3DestinationUpdat
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64((int64)(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))),
},
ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)),
Prefix: extractPrefixConfiguration(s3),
Expand All @@ -1936,8 +1936,8 @@ func updateExtendedS3Config(d *schema.ResourceData) *firehose.ExtendedS3Destinat
BucketARN: aws.String(s3["bucket_arn"].(string)),
RoleARN: aws.String(s3["role_arn"].(string)),
BufferingHints: &firehose.BufferingHints{
IntervalInSeconds: aws.Int64((int64)(s3["buffer_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffer_size"].(int))),
IntervalInSeconds: aws.Int64((int64)(s3["buffering_interval"].(int))),
SizeInMBs: aws.Int64((int64)(s3["buffering_size"].(int))),
},
ErrorOutputPrefix: aws.String(s3["error_output_prefix"].(string)),
Prefix: extractPrefixConfiguration(s3),
Expand Down
52 changes: 26 additions & 26 deletions internal/service/firehose/delivery_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2657,8 +2657,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
}
Expand Down Expand Up @@ -2765,8 +2765,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
bucket_arn = aws_s3_bucket.bucket.arn
# InvalidArgumentException: BufferingHints.SizeInMBs must be at least 64 when data format conversion is enabled.
buffer_size = 128
role_arn = aws_iam_role.firehose.arn
buffering_size = 128
role_arn = aws_iam_role.firehose.arn
data_format_conversion_configuration {
enabled = %[2]t
Expand Down Expand Up @@ -2831,8 +2831,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
bucket_arn = aws_s3_bucket.bucket.arn
# InvalidArgumentException: BufferingHints.SizeInMBs must be at least 64 when data format conversion is enabled.
buffer_size = 128
role_arn = aws_iam_role.firehose.arn
buffering_size = 128
role_arn = aws_iam_role.firehose.arn
data_format_conversion_configuration {
input_format_configuration {
Expand Down Expand Up @@ -2909,8 +2909,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
bucket_arn = aws_s3_bucket.bucket.arn
# InvalidArgumentException: BufferingHints.SizeInMBs must be at least 64 when data format conversion is enabled.
buffer_size = 128
role_arn = aws_iam_role.firehose.arn
buffering_size = 128
role_arn = aws_iam_role.firehose.arn
data_format_conversion_configuration {
input_format_configuration {
Expand Down Expand Up @@ -2973,8 +2973,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
bucket_arn = aws_s3_bucket.bucket.arn
# InvalidArgumentException: BufferingHints.SizeInMBs must be at least 64 when data format conversion is enabled.
buffer_size = 128
role_arn = aws_iam_role.firehose.arn
buffering_size = 128
role_arn = aws_iam_role.firehose.arn
data_format_conversion_configuration {
input_format_configuration {
Expand Down Expand Up @@ -3037,8 +3037,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
bucket_arn = aws_s3_bucket.bucket.arn
# InvalidArgumentException: BufferingHints.SizeInMBs must be at least 64 when data format conversion is enabled.
buffer_size = 128
role_arn = aws_iam_role.firehose.arn
buffering_size = 128
role_arn = aws_iam_role.firehose.arn
data_format_conversion_configuration {
input_format_configuration {
Expand Down Expand Up @@ -3182,7 +3182,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
bucket_arn = aws_s3_bucket.bucket.arn
prefix = "custom-prefix/customerId=!{partitionKeyFromLambda:customerId}/year=!{timestamp:yyyy}/month=!{timestamp:MM}/day=!{timestamp:dd}/hour=!{timestamp:HH}/"
error_output_prefix = "prefix1"
buffer_size = 64
buffering_size = 64
dynamic_partitioning_configuration {
enabled = true
Expand Down Expand Up @@ -3237,7 +3237,7 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
error_output_prefix = "prefix1"
buffer_size = 64
buffering_size = 64
}
}
`, rName))
Expand Down Expand Up @@ -3278,8 +3278,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
}
}
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
s3_backup_mode = "Enabled"
Expand All @@ -3305,8 +3305,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
s3_backup_mode = "Enabled"
Expand Down Expand Up @@ -3379,8 +3379,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -3458,8 +3458,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -3601,8 +3601,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -4426,8 +4426,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" {
extended_s3_configuration {
role_arn = aws_iam_role.firehose.arn
prefix = "tracking/autocomplete_stream/"
buffer_interval = 300
buffer_size = 5
buffering_interval = 300
buffering_size = 5
compression_format = "GZIP"
bucket_arn = aws_s3_bucket.bucket.arn
}
Expand Down
32 changes: 16 additions & 16 deletions website/docs/r/kinesis_firehose_delivery_stream.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ resource "aws_kinesis_firehose_delivery_stream" "extended_s3_stream" {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 64
buffering_size = 64
# https://docs.aws.amazon.com/firehose/latest/dev/dynamic-partitioning.html
dynamic_partitioning_configuration {
Expand Down Expand Up @@ -212,8 +212,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand All @@ -230,8 +230,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_backup_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 15
buffer_interval = 300
buffering_size = 15
buffering_interval = 300
compression_format = "GZIP"
}
}
Expand All @@ -252,8 +252,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -375,8 +375,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -495,8 +495,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand All @@ -520,8 +520,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
s3_configuration {
role_arn = aws_iam_role.firehose.arn
bucket_arn = aws_s3_bucket.bucket.arn
buffer_size = 10
buffer_interval = 400
buffering_size = 10
buffering_interval = 400
compression_format = "GZIP"
}
Expand Down Expand Up @@ -590,9 +590,9 @@ The `s3_configuration` object supports the following:
* `role_arn` - (Required) The ARN of the AWS credentials.
* `bucket_arn` - (Required) The ARN of the S3 bucket
* `prefix` - (Optional) The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
* `buffer_size` - (Optional) Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
* `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
* `buffer_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
* `compression_format` - (Optional) The compression format. If no value is specified, the default is `UNCOMPRESSED`. Other supported values are `GZIP`, `ZIP`, `Snappy`, & `HADOOP_SNAPPY`.
* `error_output_prefix` - (Optional) Prefix added to failed records before writing them to S3. Not currently supported for `redshift` destination. This prefix appears immediately following the bucket name. For information about how to specify this prefix, see [Custom Prefixes for Amazon S3 Objects](https://docs.aws.amazon.com/firehose/latest/dev/s3-prefixes.html).
* `kms_key_arn` - (Optional) Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will
Expand Down Expand Up @@ -729,7 +729,7 @@ resource "aws_kinesis_firehose_delivery_stream" "example" {
# ... other configuration ...
extended_s3_configuration {
# Must be at least 64
buffer_size = 128
buffering_size = 128
# ... other configuration ...
data_format_conversion_configuration {
Expand Down

0 comments on commit 27df69a

Please sign in to comment.