diff --git a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts index 92239cd40f0e..26441eabbeb8 100644 --- a/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/CreateDeliveryStreamCommand.ts @@ -32,30 +32,30 @@ export interface CreateDeliveryStreamCommandInput extends CreateDeliveryStreamIn export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamOutput, __MetadataBearer {} /** - *

Creates a Firehose delivery stream.

- *

By default, you can create up to 50 delivery streams per Amazon Web Services + *

Creates a Firehose stream.

+ *

By default, you can create up to 50 Firehose streams per Amazon Web Services * Region.

*

This is an asynchronous operation that immediately returns. The initial status of the - * delivery stream is CREATING. After the delivery stream is created, its status - * is ACTIVE and it now accepts data. If the delivery stream creation fails, the + * Firehose stream is CREATING. After the Firehose stream is created, its status + * is ACTIVE and it now accepts data. If the Firehose stream creation fails, the * status transitions to CREATING_FAILED. Attempts to send data to a delivery * stream that is not in the ACTIVE state cause an exception. To check the state - * of a delivery stream, use DescribeDeliveryStream.

- *

If the status of a delivery stream is CREATING_FAILED, this status + * of a Firehose stream, use DescribeDeliveryStream.

+ *

If the status of a Firehose stream is CREATING_FAILED, this status * doesn't change, and you can't invoke CreateDeliveryStream again on it. * However, you can invoke the DeleteDeliveryStream operation to delete * it.

- *

A Firehose delivery stream can be configured to receive records directly + *

A Firehose stream can be configured to receive records directly * from providers using PutRecord or PutRecordBatch, or it * can be configured to use an existing Kinesis stream as its source. To specify a Kinesis * data stream as input, set the DeliveryStreamType parameter to * KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name * (ARN) and role ARN in the KinesisStreamSourceConfiguration * parameter.

- *

To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is + *

To create a Firehose stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is * optional. You can also invoke StartDeliveryStreamEncryption to turn on - * SSE for an existing delivery stream that doesn't have SSE enabled.

- *

A delivery stream is configured with a single destination, such as Amazon Simple + * SSE for an existing Firehose stream that doesn't have SSE enabled.

+ *

A Firehose stream is configured with a single destination, such as Amazon Simple * Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch * Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by * third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New @@ -107,7 +107,7 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * const client = new FirehoseClient(config); * const input = { // CreateDeliveryStreamInput * DeliveryStreamName: "STRING_VALUE", // required - * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", + * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource", * KinesisStreamSourceConfiguration: { // KinesisStreamSourceConfiguration * KinesisStreamARN: "STRING_VALUE", // required * RoleARN: "STRING_VALUE", // required @@ -586,9 +586,22 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * UniqueKeys: [ * "STRING_VALUE", * ], + * PartitionSpec: { // PartitionSpec + * Identity: [ // PartitionFields + * { // PartitionField + * SourceName: "STRING_VALUE", // required + * }, + * ], + * }, * S3ErrorOutputPrefix: "STRING_VALUE", * }, * ], + * SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration + * Enabled: true || false, // required + * }, + * TableCreationConfiguration: { // TableCreationConfiguration + * Enabled: true || false, // required + * }, * BufferingHints: "", * CloudWatchLoggingOptions: "", * ProcessingConfiguration: "", @@ -599,9 +612,54 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * RoleARN: "STRING_VALUE", // required * CatalogConfiguration: { // CatalogConfiguration * CatalogARN: "STRING_VALUE", + * WarehouseLocation: "STRING_VALUE", * }, * S3Configuration: "", // required * }, + * DatabaseSourceConfiguration: { // DatabaseSourceConfiguration + * Type: "MySQL" || "PostgreSQL", // required + * Endpoint: "STRING_VALUE", // required + * Port: Number("int"), // required + * SSLMode: "Disabled" || "Enabled", + * Databases: { // DatabaseList + * Include: [ // DatabaseIncludeOrExcludeList + * "STRING_VALUE", + * ], + * Exclude: [ + * "STRING_VALUE", + * ], + * }, + * Tables: { // DatabaseTableList + * Include: [ // DatabaseTableIncludeOrExcludeList + * "STRING_VALUE", + * ], + * Exclude: [ + * "STRING_VALUE", + * ], + * }, + * Columns: { // DatabaseColumnList + * Include: [ // DatabaseColumnIncludeOrExcludeList + * "STRING_VALUE", + * ], + * Exclude: [ + * "STRING_VALUE", + * ], + * }, + * SurrogateKeys: [ // DatabaseSurrogateKeyList + * "STRING_VALUE", + * ], + * SnapshotWatermarkTable: "STRING_VALUE", // required + * DatabaseSourceAuthenticationConfiguration: { // DatabaseSourceAuthenticationConfiguration + * SecretsManagerConfiguration: { + * SecretARN: "STRING_VALUE", + * RoleARN: "STRING_VALUE", + * Enabled: true || false, // required + * }, + * }, + * DatabaseSourceVPCConfiguration: { // DatabaseSourceVPCConfiguration + * VpcEndpointServiceName: "STRING_VALUE", // required + * }, + * }, * }; * const command = new CreateDeliveryStreamCommand(input); * const response = await client.send(command); @@ -622,7 +680,7 @@ export interface CreateDeliveryStreamCommandOutput extends CreateDeliveryStreamO * * @throws {@link InvalidKMSResourceException} (client fault) *

Firehose throws this exception when an attempt to put records or to start - * or stop delivery stream encryption fails. This happens when the KMS service throws one of + * or stop Firehose stream encryption fails. This happens when the KMS service throws one of * the following exception types: AccessDeniedException, * InvalidStateException, DisabledException, or * NotFoundException.

diff --git a/clients/client-firehose/src/commands/DeleteDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/DeleteDeliveryStreamCommand.ts index 5553662911bb..33c1c50f2c13 100644 --- a/clients/client-firehose/src/commands/DeleteDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/DeleteDeliveryStreamCommand.ts @@ -28,17 +28,17 @@ export interface DeleteDeliveryStreamCommandInput extends DeleteDeliveryStreamIn export interface DeleteDeliveryStreamCommandOutput extends DeleteDeliveryStreamOutput, __MetadataBearer {} /** - *

Deletes a delivery stream and its data.

- *

You can delete a delivery stream only if it is in one of the following states: + *

Deletes a Firehose stream and its data.

+ *

You can delete a Firehose stream only if it is in one of the following states: * ACTIVE, DELETING, CREATING_FAILED, or - * DELETING_FAILED. You can't delete a delivery stream that is in the - * CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

- *

DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the - * DELETING state.While the delivery stream is in the DELETING state, the service might + * DELETING_FAILED. You can't delete a Firehose stream that is in the + * CREATING state. To check the state of a Firehose stream, use DescribeDeliveryStream.

+ *

DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream is marked for deletion, and it goes into the + * DELETING state.While the Firehose stream is in the DELETING state, the service might * continue to accept records, but it doesn't make any guarantees with respect to delivering * the data. Therefore, as a best practice, first stop any applications that are sending - * records before you delete a delivery stream.

- *

Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the + * records before you delete a Firehose stream.

+ *

Removal of a Firehose stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the * DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state * to be removed.

* @example diff --git a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts index d6e692a2a9ec..db0ca5f12343 100644 --- a/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/DescribeDeliveryStreamCommand.ts @@ -32,10 +32,10 @@ export interface DescribeDeliveryStreamCommandInput extends DescribeDeliveryStre export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStreamOutput, __MetadataBearer {} /** - *

Describes the specified delivery stream and its status. For example, after your - * delivery stream is created, call DescribeDeliveryStream to see whether the - * delivery stream is ACTIVE and therefore ready for data to be sent to it.

- *

If the status of a delivery stream is CREATING_FAILED, this status + *

Describes the specified Firehose stream and its status. For example, after your + * Firehose stream is created, call DescribeDeliveryStream to see whether the + * Firehose stream is ACTIVE and therefore ready for data to be sent to it.

+ *

If the status of a Firehose stream is CREATING_FAILED, this status * doesn't change, and you can't invoke CreateDeliveryStream again on it. * However, you can invoke the DeleteDeliveryStream operation to delete it. * If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

@@ -58,7 +58,7 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // DeliveryStreamARN: "STRING_VALUE", // required * // DeliveryStreamStatus: "CREATING" || "CREATING_FAILED" || "DELETING" || "DELETING_FAILED" || "ACTIVE", // required * // FailureDescription: { // FailureDescription - * // Type: "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required + * // Type: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" || "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" || "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required * // Details: "STRING_VALUE", // required * // }, * // DeliveryStreamEncryptionConfiguration: { // DeliveryStreamEncryptionConfiguration @@ -66,11 +66,11 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // KeyType: "AWS_OWNED_CMK" || "CUSTOMER_MANAGED_CMK", * // Status: "ENABLED" || "ENABLING" || "ENABLING_FAILED" || "DISABLED" || "DISABLING" || "DISABLING_FAILED", * // FailureDescription: { - * // Type: "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required + * // Type: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" || "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" || "RETIRE_KMS_GRANT_FAILED" || "CREATE_KMS_GRANT_FAILED" || "KMS_ACCESS_DENIED" || "DISABLED_KMS_KEY" || "INVALID_KMS_KEY" || "KMS_KEY_NOT_FOUND" || "KMS_OPT_IN_REQUIRED" || "CREATE_ENI_FAILED" || "DELETE_ENI_FAILED" || "SUBNET_NOT_FOUND" || "SECURITY_GROUP_NOT_FOUND" || "ENI_ACCESS_DENIED" || "SUBNET_ACCESS_DENIED" || "SECURITY_GROUP_ACCESS_DENIED" || "UNKNOWN_ERROR", // required * // Details: "STRING_VALUE", // required * // }, * // }, - * // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", // required + * // DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource", // required * // VersionId: "STRING_VALUE", // required * // CreateTimestamp: new Date("TIMESTAMP"), * // LastUpdateTimestamp: new Date("TIMESTAMP"), @@ -90,6 +90,60 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // DeliveryStartTimestamp: new Date("TIMESTAMP"), * // ReadFromTimestamp: new Date("TIMESTAMP"), * // }, + * // DatabaseSourceDescription: { // DatabaseSourceDescription + * // Type: "MySQL" || "PostgreSQL", + * // Endpoint: "STRING_VALUE", + * // Port: Number("int"), + * // SSLMode: "Disabled" || "Enabled", + * // Databases: { // DatabaseList + * // Include: [ // DatabaseIncludeOrExcludeList + * // "STRING_VALUE", + * // ], + * // Exclude: [ + * // "STRING_VALUE", + * // ], + * // }, + * // Tables: { // DatabaseTableList + * // Include: [ // DatabaseTableIncludeOrExcludeList + * // "STRING_VALUE", + * // ], + * // Exclude: [ + * // "STRING_VALUE", + * // ], + * // }, + * // Columns: { // DatabaseColumnList + * // Include: [ // DatabaseColumnIncludeOrExcludeList + * // "STRING_VALUE", + * // ], + * // Exclude: [ + * // "STRING_VALUE", + * // ], + * // }, + * // SurrogateKeys: [ + * // "STRING_VALUE", + * // ], + * // SnapshotWatermarkTable: "STRING_VALUE", + * // SnapshotInfo: [ // DatabaseSnapshotInfoList + * // { // DatabaseSnapshotInfo + * // Id: "STRING_VALUE", // required + * // Table: "STRING_VALUE", // required + * // RequestTimestamp: new Date("TIMESTAMP"), // required + * // RequestedBy: "USER" || "FIREHOSE", // required + * // Status: "IN_PROGRESS" || "COMPLETE" || "SUSPENDED", // required + * // FailureDescription: "", + * // }, + * // ], + * // DatabaseSourceAuthenticationConfiguration: { // DatabaseSourceAuthenticationConfiguration + * // SecretsManagerConfiguration: { // SecretsManagerConfiguration + * // SecretARN: "STRING_VALUE", + * // RoleARN: "STRING_VALUE", + * // Enabled: true || false, // required + * // }, + * // }, + * // DatabaseSourceVPCConfiguration: { // DatabaseSourceVPCConfiguration + * // VpcEndpointServiceName: "STRING_VALUE", // required + * // }, + * // }, * // }, * // Destinations: [ // DestinationDescriptionList // required * // { // DestinationDescription @@ -308,7 +362,7 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // }, * // }, * // CloudWatchLoggingOptions: "", - * // SecretsManagerConfiguration: { // SecretsManagerConfiguration + * // SecretsManagerConfiguration: { * // SecretARN: "STRING_VALUE", * // RoleARN: "STRING_VALUE", * // Enabled: true || false, // required @@ -548,9 +602,22 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // UniqueKeys: [ * // "STRING_VALUE", * // ], + * // PartitionSpec: { // PartitionSpec + * // Identity: [ // PartitionFields + * // { // PartitionField + * // SourceName: "STRING_VALUE", // required + * // }, + * // ], + * // }, * // S3ErrorOutputPrefix: "STRING_VALUE", * // }, * // ], + * // SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration + * // Enabled: true || false, // required + * // }, + * // TableCreationConfiguration: { // TableCreationConfiguration + * // Enabled: true || false, // required + * // }, * // BufferingHints: "", * // CloudWatchLoggingOptions: "", * // ProcessingConfiguration: "", @@ -561,6 +628,7 @@ export interface DescribeDeliveryStreamCommandOutput extends DescribeDeliveryStr * // RoleARN: "STRING_VALUE", * // CatalogConfiguration: { // CatalogConfiguration * // CatalogARN: "STRING_VALUE", + * // WarehouseLocation: "STRING_VALUE", * // }, * // S3DestinationDescription: "", * // }, diff --git a/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts b/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts index be4786da713e..9c9b636c8ef4 100644 --- a/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts +++ b/clients/client-firehose/src/commands/ListDeliveryStreamsCommand.ts @@ -28,14 +28,14 @@ export interface ListDeliveryStreamsCommandInput extends ListDeliveryStreamsInpu export interface ListDeliveryStreamsCommandOutput extends ListDeliveryStreamsOutput, __MetadataBearer {} /** - *

Lists your delivery streams in alphabetical order of their names.

- *

The number of delivery streams might be too large to return using a single call to - * ListDeliveryStreams. You can limit the number of delivery streams returned, + *

Lists your Firehose streams in alphabetical order of their names.

+ *

The number of Firehose streams might be too large to return using a single call to + * ListDeliveryStreams. You can limit the number of Firehose streams returned, * using the Limit parameter. To determine whether there are more delivery * streams to list, check the value of HasMoreDeliveryStreams in the output. If - * there are more delivery streams to list, you can request them by calling this operation + * there are more Firehose streams to list, you can request them by calling this operation * again and setting the ExclusiveStartDeliveryStreamName parameter to the name - * of the last delivery stream returned in the last call.

+ * of the last Firehose stream returned in the last call.

* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -44,7 +44,7 @@ export interface ListDeliveryStreamsCommandOutput extends ListDeliveryStreamsOut * const client = new FirehoseClient(config); * const input = { // ListDeliveryStreamsInput * Limit: Number("int"), - * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource", + * DeliveryStreamType: "DirectPut" || "KinesisStreamAsSource" || "MSKAsSource" || "DatabaseAsSource", * ExclusiveStartDeliveryStreamName: "STRING_VALUE", * }; * const command = new ListDeliveryStreamsCommand(input); diff --git a/clients/client-firehose/src/commands/ListTagsForDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/ListTagsForDeliveryStreamCommand.ts index a76db5a031ca..03f4af387749 100644 --- a/clients/client-firehose/src/commands/ListTagsForDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/ListTagsForDeliveryStreamCommand.ts @@ -28,7 +28,7 @@ export interface ListTagsForDeliveryStreamCommandInput extends ListTagsForDelive export interface ListTagsForDeliveryStreamCommandOutput extends ListTagsForDeliveryStreamOutput, __MetadataBearer {} /** - *

Lists the tags for the specified delivery stream. This operation has a limit of five + *

Lists the tags for the specified Firehose stream. This operation has a limit of five * transactions per second per account.

* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts index 2703733f801a..0ecf6dbe6267 100644 --- a/clients/client-firehose/src/commands/PutRecordBatchCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordBatchCommand.ts @@ -28,19 +28,24 @@ export interface PutRecordBatchCommandInput extends PutRecordBatchInput {} export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __MetadataBearer {} /** - *

Writes multiple data records into a delivery stream in a single call, which can + *

Writes multiple data records into a Firehose stream in a single call, which can * achieve higher throughput per producer than when writing single records. To write single - * data records into a delivery stream, use PutRecord. Applications using + * data records into a Firehose stream, use PutRecord. Applications using * these operations are referred to as producers.

- *

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

+ *

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

*

For information about service quota, see Amazon Firehose * Quota.

*

Each PutRecordBatch request supports up to 500 records. Each record * in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB * for the entire request. These limits cannot be changed.

- *

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 + *

You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 * KB in size, and any kind of data. For example, it could be a segment from a log file, * geographic location data, website clickstream data, and so on.

+ *

For multi record de-aggregation, you can not put more than 500 records even if the + * data blob length is less than 1000 KiB. If you include more than 500 records, the request + * succeeds but the record de-aggregation doesn't work as expected and transformation lambda + * is invoked with the complete base64 encoded data blob instead of de-aggregated base64 + * decoded records.

*

Firehose buffers records before delivering them to the destination. To * disambiguate the data blobs at the destination, a common solution is to use delimiters in * the data, such as a newline (\n) or some other character unique within the @@ -70,12 +75,12 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met * recommend that you handle any duplicates at the destination.

*

If PutRecordBatch throws ServiceUnavailableException, * the API is automatically reinvoked (retried) 3 times. If the exception persists, it is - * possible that the throughput limits have been exceeded for the delivery stream.

+ * possible that the throughput limits have been exceeded for the Firehose stream.

*

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can * result in data duplicates. For larger data assets, allow for a longer time out before * retrying Put API operations.

*

Data records sent to Firehose are stored for 24 hours from the time they - * are added to a delivery stream as it attempts to send the records to the destination. If + * are added to a Firehose stream as it attempts to send the records to the destination. If * the destination is unreachable for more than 24 hours, the data is no longer * available.

* @@ -123,7 +128,7 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met * * @throws {@link InvalidKMSResourceException} (client fault) *

Firehose throws this exception when an attempt to put records or to start - * or stop delivery stream encryption fails. This happens when the KMS service throws one of + * or stop Firehose stream encryption fails. This happens when the KMS service throws one of * the following exception types: AccessDeniedException, * InvalidStateException, DisabledException, or * NotFoundException.

@@ -136,7 +141,7 @@ export interface PutRecordBatchCommandOutput extends PutRecordBatchOutput, __Met * * @throws {@link ServiceUnavailableException} (server fault) *

The service is unavailable. Back off and retry the operation. If you continue to see - * the exception, throughput limits for the delivery stream may have been exceeded. For more + * the exception, throughput limits for the Firehose stream may have been exceeded. For more * information about limits and how to request an increase, see Amazon Firehose * Limits.

* diff --git a/clients/client-firehose/src/commands/PutRecordCommand.ts b/clients/client-firehose/src/commands/PutRecordCommand.ts index 2ff6f08be691..9304be438039 100644 --- a/clients/client-firehose/src/commands/PutRecordCommand.ts +++ b/clients/client-firehose/src/commands/PutRecordCommand.ts @@ -28,19 +28,24 @@ export interface PutRecordCommandInput extends PutRecordInput {} export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBearer {} /** - *

Writes a single data record into an Amazon Firehose delivery stream. To - * write multiple data records into a delivery stream, use PutRecordBatch. + *

Writes a single data record into an Firehose stream. To + * write multiple data records into a Firehose stream, use PutRecordBatch. * Applications using these operations are referred to as producers.

- *

By default, each delivery stream can take in up to 2,000 transactions per second, + *

By default, each Firehose stream can take in up to 2,000 transactions per second, * 5,000 records per second, or 5 MB per second. If you use PutRecord and * PutRecordBatch, the limits are an aggregate across these two - * operations for each delivery stream. For more information about limits and how to request + * operations for each Firehose stream. For more information about limits and how to request * an increase, see Amazon * Firehose Limits.

- *

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

- *

You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 + *

Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

+ *

You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000 * KiB in size, and any kind of data. For example, it can be a segment from a log file, * geographic location data, website clickstream data, and so on.

+ *

For multi record de-aggregation, you can not put more than 500 records even if the + * data blob length is less than 1000 KiB. If you include more than 500 records, the request + * succeeds but the record de-aggregation doesn't work as expected and transformation lambda + * is invoked with the complete base64 encoded data blob instead of de-aggregated base64 + * decoded records.

*

Firehose buffers records before delivering them to the destination. To * disambiguate the data blobs at the destination, a common solution is to use delimiters in * the data, such as a newline (\n) or some other character unique within the @@ -52,12 +57,12 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare *

If the PutRecord operation throws a * ServiceUnavailableException, the API is automatically reinvoked (retried) 3 * times. If the exception persists, it is possible that the throughput limits have been - * exceeded for the delivery stream.

+ * exceeded for the Firehose stream.

*

Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can * result in data duplicates. For larger data assets, allow for a longer time out before * retrying Put API operations.

*

Data records sent to Firehose are stored for 24 hours from the time they - * are added to a delivery stream as it tries to send the records to the destination. If the + * are added to a Firehose stream as it tries to send the records to the destination. If the * destination is unreachable for more than 24 hours, the data is no longer * available.

* @@ -96,7 +101,7 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * * @throws {@link InvalidKMSResourceException} (client fault) *

Firehose throws this exception when an attempt to put records or to start - * or stop delivery stream encryption fails. This happens when the KMS service throws one of + * or stop Firehose stream encryption fails. This happens when the KMS service throws one of * the following exception types: AccessDeniedException, * InvalidStateException, DisabledException, or * NotFoundException.

@@ -109,7 +114,7 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * * @throws {@link ServiceUnavailableException} (server fault) *

The service is unavailable. Back off and retry the operation. If you continue to see - * the exception, throughput limits for the delivery stream may have been exceeded. For more + * the exception, throughput limits for the Firehose stream may have been exceeded. For more * information about limits and how to request an increase, see Amazon Firehose * Limits.

* diff --git a/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts b/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts index cddca96e110d..ed5d43c6b471 100644 --- a/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts +++ b/clients/client-firehose/src/commands/StartDeliveryStreamEncryptionCommand.ts @@ -33,19 +33,19 @@ export interface StartDeliveryStreamEncryptionCommandOutput __MetadataBearer {} /** - *

Enables server-side encryption (SSE) for the delivery stream.

+ *

Enables server-side encryption (SSE) for the Firehose stream.

*

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then - * to ENABLED. The encryption status of a delivery stream is the + * to ENABLED. The encryption status of a Firehose stream is the * Status property in DeliveryStreamEncryptionConfiguration. * If the operation fails, the encryption status changes to ENABLING_FAILED. You - * can continue to read and write data to your delivery stream while the encryption status is + * can continue to read and write data to your Firehose stream while the encryption status is * ENABLING, but the data is not encrypted. It can take up to 5 seconds after * the encryption status changes to ENABLED before all records written to the - * delivery stream are encrypted. To find out whether a record or a batch of records was + * Firehose stream are encrypted. To find out whether a record or a batch of records was * encrypted, check the response elements PutRecordOutput$Encrypted and * PutRecordBatchOutput$Encrypted, respectively.

- *

To check the encryption status of a delivery stream, use DescribeDeliveryStream.

- *

Even if encryption is currently enabled for a delivery stream, you can still invoke this + *

To check the encryption status of a Firehose stream, use DescribeDeliveryStream.

+ *

Even if encryption is currently enabled for a Firehose stream, you can still invoke this * operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this * method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK, * Firehose schedules the grant it had on the old CMK for retirement. If the new @@ -55,21 +55,21 @@ export interface StartDeliveryStreamEncryptionCommandOutput *

For the KMS grant creation to be successful, the Firehose API operations * StartDeliveryStreamEncryption and CreateDeliveryStream should * not be called with session credentials that are more than 6 hours old.

- *

If a delivery stream already has encryption enabled and then you invoke this operation + *

If a Firehose stream already has encryption enabled and then you invoke this operation * to change the ARN of the CMK or both its type and ARN and you get * ENABLING_FAILED, this only means that the attempt to change the CMK failed. * In this case, encryption remains enabled with the old CMK.

- *

If the encryption status of your delivery stream is ENABLING_FAILED, you + *

If the encryption status of your Firehose stream is ENABLING_FAILED, you * can invoke this operation again with a valid CMK. The CMK must be enabled and the key * policy mustn't explicitly deny the permission for Firehose to invoke KMS * encrypt and decrypt operations.

- *

You can enable SSE for a delivery stream only if it's a delivery stream that uses + *

You can enable SSE for a Firehose stream only if it's a Firehose stream that uses * DirectPut as its source.

*

The StartDeliveryStreamEncryption and * StopDeliveryStreamEncryption operations have a combined limit of 25 calls - * per delivery stream per 24 hours. For example, you reach the limit if you call + * per Firehose stream per 24 hours. For example, you reach the limit if you call * StartDeliveryStreamEncryption 13 times and - * StopDeliveryStreamEncryption 12 times for the same delivery stream in a + * StopDeliveryStreamEncryption 12 times for the same Firehose stream in a * 24-hour period.

* @example * Use a bare-bones client and the command you need to make an API call. @@ -101,7 +101,7 @@ export interface StartDeliveryStreamEncryptionCommandOutput * * @throws {@link InvalidKMSResourceException} (client fault) *

Firehose throws this exception when an attempt to put records or to start - * or stop delivery stream encryption fails. This happens when the KMS service throws one of + * or stop Firehose stream encryption fails. This happens when the KMS service throws one of * the following exception types: AccessDeniedException, * InvalidStateException, DisabledException, or * NotFoundException.

diff --git a/clients/client-firehose/src/commands/StopDeliveryStreamEncryptionCommand.ts b/clients/client-firehose/src/commands/StopDeliveryStreamEncryptionCommand.ts index 9ec8e02068e7..16de0b1c0960 100644 --- a/clients/client-firehose/src/commands/StopDeliveryStreamEncryptionCommand.ts +++ b/clients/client-firehose/src/commands/StopDeliveryStreamEncryptionCommand.ts @@ -33,24 +33,24 @@ export interface StopDeliveryStreamEncryptionCommandOutput __MetadataBearer {} /** - *

Disables server-side encryption (SSE) for the delivery stream.

+ *

Disables server-side encryption (SSE) for the Firehose stream.

*

This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then * to DISABLED. You can continue to read and write data to your stream while its * status is DISABLING. It can take up to 5 seconds after the encryption status - * changes to DISABLED before all records written to the delivery stream are no + * changes to DISABLED before all records written to the Firehose stream are no * longer subject to encryption. To find out whether a record or a batch of records was * encrypted, check the response elements PutRecordOutput$Encrypted and * PutRecordBatchOutput$Encrypted, respectively.

- *

To check the encryption state of a delivery stream, use DescribeDeliveryStream.

+ *

To check the encryption state of a Firehose stream, use DescribeDeliveryStream.

*

If SSE is enabled using a customer managed CMK and then you invoke * StopDeliveryStreamEncryption, Firehose schedules the related * KMS grant for retirement and then retires it after it ensures that it is finished * delivering records to the destination.

*

The StartDeliveryStreamEncryption and * StopDeliveryStreamEncryption operations have a combined limit of 25 calls - * per delivery stream per 24 hours. For example, you reach the limit if you call + * per Firehose stream per 24 hours. For example, you reach the limit if you call * StartDeliveryStreamEncryption 13 times and - * StopDeliveryStreamEncryption 12 times for the same delivery stream in a + * StopDeliveryStreamEncryption 12 times for the same Firehose stream in a * 24-hour period.

* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-firehose/src/commands/TagDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/TagDeliveryStreamCommand.ts index c53b62d12fb7..8839d5ee1583 100644 --- a/clients/client-firehose/src/commands/TagDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/TagDeliveryStreamCommand.ts @@ -28,15 +28,15 @@ export interface TagDeliveryStreamCommandInput extends TagDeliveryStreamInput {} export interface TagDeliveryStreamCommandOutput extends TagDeliveryStreamOutput, __MetadataBearer {} /** - *

Adds or updates tags for the specified delivery stream. A tag is a key-value pair + *

Adds or updates tags for the specified Firehose stream. A tag is a key-value pair * that you can define and assign to Amazon Web Services resources. If you specify a tag that * already exists, the tag value is replaced with the value that you specify in the request. * Tags are metadata. For example, you can add friendly names and descriptions or other types - * of information that can help you distinguish the delivery stream. For more information + * of information that can help you distinguish the Firehose stream. For more information * about tags, see Using Cost Allocation * Tags in the Amazon Web Services Billing and Cost Management User * Guide.

- *

Each delivery stream can have up to 50 tags.

+ *

Each Firehose stream can have up to 50 tags.

*

This operation has a limit of five transactions per second per account.

* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-firehose/src/commands/UntagDeliveryStreamCommand.ts b/clients/client-firehose/src/commands/UntagDeliveryStreamCommand.ts index f20babcd9fa7..9bfcd40d738c 100644 --- a/clients/client-firehose/src/commands/UntagDeliveryStreamCommand.ts +++ b/clients/client-firehose/src/commands/UntagDeliveryStreamCommand.ts @@ -28,7 +28,7 @@ export interface UntagDeliveryStreamCommandInput extends UntagDeliveryStreamInpu export interface UntagDeliveryStreamCommandOutput extends UntagDeliveryStreamOutput, __MetadataBearer {} /** - *

Removes tags from the specified delivery stream. Removed tags are deleted, and you + *

Removes tags from the specified Firehose stream. Removed tags are deleted, and you * can't recover them after this operation successfully completes.

*

If you specify a tag that doesn't exist, the operation ignores it.

*

This operation has a limit of five transactions per second per account.

diff --git a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts index 116db9006b14..0ddd92e396f6 100644 --- a/clients/client-firehose/src/commands/UpdateDestinationCommand.ts +++ b/clients/client-firehose/src/commands/UpdateDestinationCommand.ts @@ -32,12 +32,12 @@ export interface UpdateDestinationCommandInput extends UpdateDestinationInput {} export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, __MetadataBearer {} /** - *

Updates the specified destination of the specified delivery stream.

+ *

Updates the specified destination of the specified Firehose stream.

*

Use this operation to change the destination type (for example, to replace the Amazon * S3 destination with Amazon Redshift) or change the parameters associated with a destination * (for example, to change the bucket name of the Amazon S3 destination). The update might not - * occur immediately. The target delivery stream remains active while the configurations are - * updated, so data writes to the delivery stream can continue during this process. The + * occur immediately. The target Firehose stream remains active while the configurations are + * updated, so data writes to the Firehose stream can continue during this process. The * updated configurations are usually effective within a few minutes.

*

Switching between Amazon OpenSearch Service and other services is not supported. For * an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch @@ -487,9 +487,22 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, * UniqueKeys: [ * "STRING_VALUE", * ], + * PartitionSpec: { // PartitionSpec + * Identity: [ // PartitionFields + * { // PartitionField + * SourceName: "STRING_VALUE", // required + * }, + * ], + * }, * S3ErrorOutputPrefix: "STRING_VALUE", * }, * ], + * SchemaEvolutionConfiguration: { // SchemaEvolutionConfiguration + * Enabled: true || false, // required + * }, + * TableCreationConfiguration: { // TableCreationConfiguration + * Enabled: true || false, // required + * }, * BufferingHints: "", * CloudWatchLoggingOptions: "", * ProcessingConfiguration: "", @@ -500,6 +513,7 @@ export interface UpdateDestinationCommandOutput extends UpdateDestinationOutput, * RoleARN: "STRING_VALUE", * CatalogConfiguration: { // CatalogConfiguration * CatalogARN: "STRING_VALUE", + * WarehouseLocation: "STRING_VALUE", * }, * S3Configuration: { // S3DestinationConfiguration * RoleARN: "STRING_VALUE", // required diff --git a/clients/client-firehose/src/models/models_0.ts b/clients/client-firehose/src/models/models_0.ts index a086121264b3..f46e44bc791c 100644 --- a/clients/client-firehose/src/models/models_0.ts +++ b/clients/client-firehose/src/models/models_0.ts @@ -20,7 +20,7 @@ export interface AmazonOpenSearchServerlessBufferingHints { *

Buffer incoming data to the specified size, in MBs, before delivering it to the * destination. The default value is 5.

*

We recommend setting this parameter to a value greater than the amount of data you - * typically ingest into the delivery stream in 10 seconds. For example, if you typically + * typically ingest into the Firehose stream in 10 seconds. For example, if you typically * ingest data at 1 MB/sec, the value should be 10 MB or higher.

* @public */ @@ -28,7 +28,7 @@ export interface AmazonOpenSearchServerlessBufferingHints { } /** - *

Describes the Amazon CloudWatch logging options for your delivery stream.

+ *

Describes the Amazon CloudWatch logging options for your Firehose stream.

* @public */ export interface CloudWatchLoggingOptions { @@ -202,7 +202,7 @@ export interface BufferingHints { * for it, you must also specify a value for IntervalInSeconds, and vice * versa.

*

We recommend setting this parameter to a value greater than the amount of data you - * typically ingest into the delivery stream in 10 seconds. For example, if you typically + * typically ingest into the Firehose stream in 10 seconds. For example, if you typically * ingest data at 1 MiB/sec, the value should be 10 MiB or higher.

* @public */ @@ -342,7 +342,7 @@ export interface S3DestinationConfiguration { EncryptionConfiguration?: EncryptionConfiguration; /** - *

The CloudWatch logging options for your delivery stream.

+ *

The CloudWatch logging options for your Firehose stream.

* @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -365,7 +365,7 @@ export interface VpcConfiguration { * scales up and down automatically based on throughput. To enable Firehose to * scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To * help you calculate the quota you need, assume that Firehose can create up to - * three ENIs for this delivery stream for each of the subnets specified here. For more + * three ENIs for this Firehose stream for each of the subnets specified here. For more * information about ENI quota, see Network Interfaces * in the Amazon VPC Quotas topic.

* @public @@ -373,7 +373,7 @@ export interface VpcConfiguration { SubnetIds: string[] | undefined; /** - *

The ARN of the IAM role that you want the delivery stream to use to create endpoints in + *

The ARN of the IAM role that you want the Firehose stream to use to create endpoints in * the destination VPC. You can use your existing Firehose delivery role or you * can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

*
    @@ -506,7 +506,7 @@ export interface AmazonOpenSearchServerlessDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -578,7 +578,7 @@ export interface S3DestinationDescription { EncryptionConfiguration: EncryptionConfiguration | undefined; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -599,7 +599,7 @@ export interface VpcConfigurationDescription { * scales up and down automatically based on throughput. To enable Firehose to * scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To * help you calculate the quota you need, assume that Firehose can create up to - * three ENIs for this delivery stream for each of the subnets specified here. For more + * three ENIs for this Firehose stream for each of the subnets specified here. For more * information about ENI quota, see Network Interfaces * in the Amazon VPC Quotas topic.

    * @public @@ -607,7 +607,7 @@ export interface VpcConfigurationDescription { SubnetIds: string[] | undefined; /** - *

    The ARN of the IAM role that the delivery stream uses to create endpoints in the + *

    The ARN of the IAM role that the Firehose stream uses to create endpoints in the * destination VPC. You can use your existing Firehose delivery role or you can * specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    *
      @@ -652,7 +652,7 @@ export interface VpcConfigurationDescription { *

      * *
    - *

    If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a + *

    If you revoke these permissions after you create the Firehose stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a * degradation in performance.

    * @public */ @@ -664,7 +664,7 @@ export interface VpcConfigurationDescription { * ES domain uses or different ones. If you specify different security groups, ensure that * they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure * that the Amazon ES domain's security group allows HTTPS traffic from the security groups - * specified here. If you use the same security group for both your delivery stream and the + * specified here. If you use the same security group for both your Firehose stream and the * Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more * information about security group rules, see Security group * rules in the Amazon VPC documentation.

    @@ -735,7 +735,7 @@ export interface AmazonOpenSearchServerlessDestinationDescription { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -809,7 +809,7 @@ export interface S3DestinationUpdate { EncryptionConfiguration?: EncryptionConfiguration; /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -870,7 +870,7 @@ export interface AmazonOpenSearchServerlessDestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -893,7 +893,7 @@ export interface AmazonopensearchserviceBufferingHints { *

    Buffer incoming data to the specified size, in MBs, before delivering it to the * destination. The default value is 5.

    *

    We recommend setting this parameter to a value greater than the amount of data you - * typically ingest into the delivery stream in 10 seconds. For example, if you typically + * typically ingest into the Firehose stream in 10 seconds. For example, if you typically * ingest data at 1 MB/sec, the value should be 10 MB or higher.

    * @public */ @@ -1073,7 +1073,7 @@ export interface AmazonopensearchserviceDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -1167,7 +1167,7 @@ export interface AmazonopensearchserviceDestinationDescription { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -1223,9 +1223,9 @@ export interface AmazonopensearchserviceDestinationUpdate { *

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one * type per index. If you try to specify a new type for an existing index that already has * another type, Firehose returns an error during runtime.

    - *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, + *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream, * Firehose still delivers data to Elasticsearch with the old index name and type - * name. If you want to update your delivery stream with a new index name, provide an empty + * name. If you want to update your Firehose stream with a new index name, provide an empty * string for TypeName.

    * @public */ @@ -1265,7 +1265,7 @@ export interface AmazonopensearchserviceDestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -1313,18 +1313,24 @@ export interface AuthenticationConfiguration { *

    * Describes the containers where the destination Apache Iceberg Tables are persisted. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ export interface CatalogConfiguration { /** *

    - * Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog. + * Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ CatalogARN?: string; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + WarehouseLocation?: string; } /** @@ -1405,6 +1411,262 @@ export interface CopyCommand { CopyOptions?: string; } +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseColumnList { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Include?: string[]; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Exclude?: string[]; +} + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseList { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Include?: string[]; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Exclude?: string[]; +} + +/** + *

    The structure that defines how Firehose accesses the secret.

    + * @public + */ +export interface SecretsManagerConfiguration { + /** + *

    The ARN of the secret that stores your credentials. It must be in the same region as the + * Firehose stream and the role. The secret ARN can reside in a different account than the Firehose stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    + * @public + */ + SecretARN?: string; + + /** + *

    + * Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk. + *

    + * @public + */ + RoleARN?: string; + + /** + *

    Specifies whether you want to use the secrets manager feature. When set as + * True the secrets manager configuration overwrites the existing secrets in + * the destination configuration. When it's set to False Firehose falls back to + * the credentials in the destination configuration.

    + * @public + */ + Enabled: boolean | undefined; +} + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseSourceAuthenticationConfiguration { + /** + *

    The structure that defines how Firehose accesses the secret.

    + * @public + */ + SecretsManagerConfiguration: SecretsManagerConfiguration | undefined; +} + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseSourceVPCConfiguration { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + VpcEndpointServiceName: string | undefined; +} + +/** + * @public + * @enum + */ +export const SSLMode = { + Disabled: "Disabled", + Enabled: "Enabled", +} as const; + +/** + * @public + */ +export type SSLMode = (typeof SSLMode)[keyof typeof SSLMode]; + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseTableList { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Include?: string[]; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Exclude?: string[]; +} + +/** + * @public + * @enum + */ +export const DatabaseType = { + MySQL: "MySQL", + PostgreSQL: "PostgreSQL", +} as const; + +/** + * @public + */ +export type DatabaseType = (typeof DatabaseType)[keyof typeof DatabaseType]; + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseSourceConfiguration { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Type: DatabaseType | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Endpoint: string | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Port: number | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SSLMode?: SSLMode; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Databases: DatabaseList | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Tables: DatabaseTableList | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Columns?: DatabaseColumnList; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SurrogateKeys?: string[]; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SnapshotWatermarkTable: string | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + DatabaseSourceAuthenticationConfiguration: DatabaseSourceAuthenticationConfiguration | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + DatabaseSourceVPCConfiguration: DatabaseSourceVPCConfiguration | undefined; +} + /** * @public * @enum @@ -1441,14 +1703,14 @@ export interface DeliveryStreamEncryptionConfigurationInput { * that allows the Firehose service to use the customer managed CMK to perform * encryption and decryption. Firehose manages that grant.

    *

    When you invoke StartDeliveryStreamEncryption to change the CMK for a - * delivery stream that is encrypted with a customer managed CMK, Firehose + * Firehose stream that is encrypted with a customer managed CMK, Firehose * schedules the grant it had on the old CMK for retirement.

    - *

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If + *

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 Firehose streams. If * a CreateDeliveryStream or StartDeliveryStreamEncryption * operation exceeds this limit, Firehose throws a * LimitExceededException.

    * - *

    To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't + *

    To encrypt your Firehose stream, use symmetric CMKs. Firehose doesn't * support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About * Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management * Service developer guide.

    @@ -1463,6 +1725,7 @@ export interface DeliveryStreamEncryptionConfigurationInput { * @enum */ export const DeliveryStreamType = { + DatabaseAsSource: "DatabaseAsSource", DirectPut: "DirectPut", KinesisStreamAsSource: "KinesisStreamAsSource", MSKAsSource: "MSKAsSource", @@ -1490,7 +1753,7 @@ export interface ElasticsearchBufferingHints { *

    Buffer incoming data to the specified size, in MBs, before delivering it to the * destination. The default value is 5.

    *

    We recommend setting this parameter to a value greater than the amount of data you - * typically ingest into the delivery stream in 10 seconds. For example, if you typically + * typically ingest into the Firehose stream in 10 seconds. For example, if you typically * ingest data at 1 MB/sec, the value should be 10 MB or higher.

    * @public */ @@ -1624,7 +1887,7 @@ export interface ElasticsearchDestinationConfiguration { * appended to the prefix. For more information, see Amazon S3 Backup for the * Amazon ES Destination. Default value is * FailedDocumentsOnly.

    - *

    You can't change this backup mode after you create the delivery stream.

    + *

    You can't change this backup mode after you create the Firehose stream.

    * @public */ S3BackupMode?: ElasticsearchS3BackupMode; @@ -1642,7 +1905,7 @@ export interface ElasticsearchDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -2100,14 +2363,13 @@ export interface DataFormatConversionConfiguration { } /** - *

    The retry behavior in case Firehose is unable to deliver data to an Amazon - * S3 prefix.

    + *

    The retry behavior in case Firehose is unable to deliver data to a destination.

    * @public */ export interface RetryOptions { /** *

    The period of time during which Firehose retries to deliver data to the - * specified Amazon S3 prefix.

    + * specified destination.

    * @public */ DurationInSeconds?: number; @@ -2130,7 +2392,7 @@ export interface DynamicPartitioningConfiguration { /** *

    Specifies that the dynamic partitioning is enabled for this Firehose - * delivery stream.

    + * Firehose stream.

    * @public */ Enabled?: boolean; @@ -2208,7 +2470,7 @@ export interface ExtendedS3DestinationConfiguration { EncryptionConfiguration?: EncryptionConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -2220,9 +2482,9 @@ export interface ExtendedS3DestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to + *

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to * enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the - * delivery stream to disable it.

    + * Firehose stream to disable it.

    * @public */ S3BackupMode?: S3BackupMode; @@ -2275,7 +2537,7 @@ export interface HttpEndpointBufferingHints { *

    Buffer incoming data to the specified size, in MBs, before delivering it to the * destination. The default value is 5.

    *

    We recommend setting this parameter to a value greater than the amount of data you - * typically ingest into the delivery stream in 10 seconds. For example, if you typically + * typically ingest into the Firehose stream in 10 seconds. For example, if you typically * ingest data at 1 MB/sec, the value should be 10 MB or higher.

    * @public */ @@ -2390,61 +2652,31 @@ export const HttpEndpointS3BackupMode = { export type HttpEndpointS3BackupMode = (typeof HttpEndpointS3BackupMode)[keyof typeof HttpEndpointS3BackupMode]; /** - *

    The structure that defines how Firehose accesses the secret.

    + *

    Describes the configuration of the HTTP endpoint destination.

    * @public */ -export interface SecretsManagerConfiguration { +export interface HttpEndpointDestinationConfiguration { /** - *

    The ARN of the secret that stores your credentials. It must be in the same region as the - * Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    + *

    The configuration of the HTTP endpoint selected as the destination.

    * @public */ - SecretARN?: string; + EndpointConfiguration: HttpEndpointConfiguration | undefined; /** - *

    - * Specifies the role that Firehose assumes when calling the Secrets Manager API operation. When you provide the role, it overrides any destination specific role defined in the destination configuration. If you do not provide the then we use the destination specific role. This parameter is required for Splunk. - *

    + *

    The buffering options that can be used before data is delivered to the specified + * destination. Firehose treats these options as hints, and it might choose to + * use more optimal values. The SizeInMBs and IntervalInSeconds + * parameters are optional. However, if you specify a value for one of them, you must also + * provide a value for the other.

    * @public */ - RoleARN?: string; + BufferingHints?: HttpEndpointBufferingHints; /** - *

    Specifies whether you want to use the the secrets manager feature. When set as - * True the secrets manager configuration overwrites the existing secrets in - * the destination configuration. When it's set to False Firehose falls back to - * the credentials in the destination configuration.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ - Enabled: boolean | undefined; -} - -/** - *

    Describes the configuration of the HTTP endpoint destination.

    - * @public - */ -export interface HttpEndpointDestinationConfiguration { - /** - *

    The configuration of the HTTP endpoint selected as the destination.

    - * @public - */ - EndpointConfiguration: HttpEndpointConfiguration | undefined; - - /** - *

    The buffering options that can be used before data is delivered to the specified - * destination. Firehose treats these options as hints, and it might choose to - * use more optimal values. The SizeInMBs and IntervalInSeconds - * parameters are optional. However, if you specify a value for one of them, you must also - * provide a value for the other.

    - * @public - */ - BufferingHints?: HttpEndpointBufferingHints; - - /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    - * @public - */ - CloudWatchLoggingOptions?: CloudWatchLoggingOptions; + CloudWatchLoggingOptions?: CloudWatchLoggingOptions; /** *

    The configuration of the request sent to the HTTP endpoint that is specified as the @@ -2500,17 +2732,47 @@ export interface HttpEndpointDestinationConfiguration { /** *

    - * Describes the configuration of a destination in Apache Iceberg Tables. *

    *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ +export interface PartitionField { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SourceName: string | undefined; +} + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface PartitionSpec { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Identity?: PartitionField[]; +} + +/** + *

    + * Describes the configuration of a destination in Apache Iceberg Tables. + *

    + * @public + */ export interface DestinationTableConfiguration { /** *

    * Specifies the name of the Apache Iceberg Table. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ DestinationTableName: string | undefined; @@ -2519,26 +2781,31 @@ export interface DestinationTableConfiguration { *

    * The name of the Apache Iceberg database. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ DestinationDatabaseName: string | undefined; /** *

    - * A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table. + * A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table. * *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ UniqueKeys?: string[]; + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + PartitionSpec?: PartitionSpec; + /** *

    * The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ S3ErrorOutputPrefix?: string; @@ -2560,20 +2827,66 @@ export type IcebergS3BackupMode = (typeof IcebergS3BackupMode)[keyof typeof Iceb /** *

    - * Specifies the destination configure settings for Apache Iceberg Table. *

    *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ +export interface SchemaEvolutionConfiguration { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Enabled: boolean | undefined; +} + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface TableCreationConfiguration { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Enabled: boolean | undefined; +} + +/** + *

    + * Specifies the destination configure settings for Apache Iceberg Table. + *

    + * @public + */ export interface IcebergDestinationConfiguration { /** *

    Provides a list of DestinationTableConfigurations which Firehose uses - * to deliver data to Apache Iceberg tables.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    * @public */ DestinationTableConfigurationList?: DestinationTableConfiguration[]; + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + TableCreationConfiguration?: TableCreationConfiguration; + /** *

    Describes hints for the buffering to perform before delivering data to the * destination. These options are treated as hints, and therefore Firehose might @@ -2585,7 +2898,7 @@ export interface IcebergDestinationConfiguration { BufferingHints?: BufferingHints; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -2597,25 +2910,22 @@ export interface IcebergDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + *

    Describes how Firehose will backup records. Currently,S3 backup only supports + * FailedDataOnly.

    * @public */ S3BackupMode?: IcebergS3BackupMode; /** - *

    The retry behavior in case Firehose is unable to deliver data to an Amazon - * S3 prefix.

    + *

    The retry behavior in case Firehose is unable to deliver data to a destination.

    * @public */ RetryOptions?: RetryOptions; /** *

    - * The Amazon Resource Name (ARN) of the Apache Iceberg tables role. + * The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ RoleARN: string | undefined; @@ -2624,7 +2934,6 @@ export interface IcebergDestinationConfiguration { *

    * Configuration describing where the destination Apache Iceberg Tables are persisted. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ CatalogConfiguration: CatalogConfiguration | undefined; @@ -2638,7 +2947,7 @@ export interface IcebergDestinationConfiguration { /** *

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as - * the source for a delivery stream.

    + * the source for a Firehose stream.

    * @public */ export interface KinesisStreamSourceConfiguration { @@ -2786,9 +3095,9 @@ export interface RedshiftDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to + *

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to * enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the - * delivery stream to disable it.

    + * Firehose stream to disable it.

    * @public */ S3BackupMode?: RedshiftS3BackupMode; @@ -2800,7 +3109,7 @@ export interface RedshiftDestinationConfiguration { S3BackupConfiguration?: S3DestinationConfiguration; /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -2822,9 +3131,8 @@ export interface RedshiftDestinationConfiguration { */ export interface SnowflakeBufferingHints { /** - *

    - * Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1. - *

    + *

    Buffer incoming data to the specified size, in MBs, before delivering it to the + * destination. The default value is 128.

    * @public */ SizeInMBs?: number; @@ -3000,7 +3308,7 @@ export interface SnowflakeDestinationConfiguration { SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -3183,7 +3491,7 @@ export interface SplunkDestinationConfiguration { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -3204,7 +3512,7 @@ export interface SplunkDestinationConfiguration { } /** - *

    Metadata that you can assign to a delivery stream, consisting of a key-value + *

    Metadata that you can assign to a Firehose stream, consisting of a key-value * pair.

    * @public */ @@ -3230,26 +3538,26 @@ export interface Tag { */ export interface CreateDeliveryStreamInput { /** - *

    The name of the delivery stream. This name must be unique per Amazon Web Services - * account in the same Amazon Web Services Region. If the delivery streams are in different - * accounts or different Regions, you can have multiple delivery streams with the same + *

    The name of the Firehose stream. This name must be unique per Amazon Web Services + * account in the same Amazon Web Services Region. If the Firehose streams are in different + * accounts or different Regions, you can have multiple Firehose streams with the same * name.

    * @public */ DeliveryStreamName: string | undefined; /** - *

    The delivery stream type. This parameter can be one of the following + *

    The Firehose stream type. This parameter can be one of the following * values:

    *
      *
    • *

      - * DirectPut: Provider applications access the delivery stream + * DirectPut: Provider applications access the Firehose stream * directly.

      *
    • *
    • *

      - * KinesisStreamAsSource: The delivery stream uses a Kinesis data + * KinesisStreamAsSource: The Firehose stream uses a Kinesis data * stream as a source.

      *
    • *
    @@ -3258,7 +3566,7 @@ export interface CreateDeliveryStreamInput { DeliveryStreamType?: DeliveryStreamType; /** - *

    When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon + *

    When a Kinesis data stream is used as the source for the Firehose stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon * Resource Name (ARN) and the role ARN for the source stream.

    * @public */ @@ -3303,170 +3611,415 @@ export interface CreateDeliveryStreamInput { * destination.

    * @public */ - AmazonopensearchserviceDestinationConfiguration?: AmazonopensearchserviceDestinationConfiguration; + AmazonopensearchserviceDestinationConfiguration?: AmazonopensearchserviceDestinationConfiguration; + + /** + *

    The destination in Splunk. You can specify only one destination.

    + * @public + */ + SplunkDestinationConfiguration?: SplunkDestinationConfiguration; + + /** + *

    Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. + * You can specify only one destination.

    + * @public + */ + HttpEndpointDestinationConfiguration?: HttpEndpointDestinationConfiguration; + + /** + *

    A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can + * define and assign to Amazon Web Services resources. Tags are metadata. For example, you can + * add friendly names and descriptions or other types of information that can help you + * distinguish the Firehose stream. For more information about tags, see Using + * Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User + * Guide.

    + *

    You can specify up to 50 tags when creating a Firehose stream.

    + *

    If you specify tags in the CreateDeliveryStream action, Amazon Data + * Firehose performs an additional authorization on the + * firehose:TagDeliveryStream action to verify if users have permissions to + * create tags. If you do not provide this permission, requests to create new Firehose + * Firehose streams with IAM resource tags will fail with an + * AccessDeniedException such as following.

    + *

    + * AccessDeniedException + *

    + *

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    + *

    For an example IAM policy, see Tag example. + *

    + * @public + */ + Tags?: Tag[]; + + /** + *

    The destination in the Serverless offering for Amazon OpenSearch Service. You can + * specify only one destination.

    + * @public + */ + AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration; + + /** + *

    The configuration for the Amazon MSK cluster to be used as the source for a delivery + * stream.

    + * @public + */ + MSKSourceConfiguration?: MSKSourceConfiguration; + + /** + *

    Configure Snowflake destination

    + * @public + */ + SnowflakeDestinationConfiguration?: SnowflakeDestinationConfiguration; + + /** + *

    + * Configure Apache Iceberg Tables destination. + *

    + * @public + */ + IcebergDestinationConfiguration?: IcebergDestinationConfiguration; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + DatabaseSourceConfiguration?: DatabaseSourceConfiguration; +} + +/** + * @public + */ +export interface CreateDeliveryStreamOutput { + /** + *

    The ARN of the Firehose stream.

    + * @public + */ + DeliveryStreamARN?: string; +} + +/** + *

    The specified input parameter has a value that is not valid.

    + * @public + */ +export class InvalidArgumentException extends __BaseException { + readonly name: "InvalidArgumentException" = "InvalidArgumentException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "InvalidArgumentException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, InvalidArgumentException.prototype); + } +} + +/** + *

    Firehose throws this exception when an attempt to put records or to start + * or stop Firehose stream encryption fails. This happens when the KMS service throws one of + * the following exception types: AccessDeniedException, + * InvalidStateException, DisabledException, or + * NotFoundException.

    + * @public + */ +export class InvalidKMSResourceException extends __BaseException { + readonly name: "InvalidKMSResourceException" = "InvalidKMSResourceException"; + readonly $fault: "client" = "client"; + code?: string; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "InvalidKMSResourceException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, InvalidKMSResourceException.prototype); + this.code = opts.code; + } +} + +/** + *

    You have already reached the limit for a requested resource.

    + * @public + */ +export class LimitExceededException extends __BaseException { + readonly name: "LimitExceededException" = "LimitExceededException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "LimitExceededException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, LimitExceededException.prototype); + } +} + +/** + *

    The resource is already in use and not available for this operation.

    + * @public + */ +export class ResourceInUseException extends __BaseException { + readonly name: "ResourceInUseException" = "ResourceInUseException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType) { + super({ + name: "ResourceInUseException", + $fault: "client", + ...opts, + }); + Object.setPrototypeOf(this, ResourceInUseException.prototype); + } +} + +/** + * @public + * @enum + */ +export const DeliveryStreamFailureType = { + CREATE_ENI_FAILED: "CREATE_ENI_FAILED", + CREATE_KMS_GRANT_FAILED: "CREATE_KMS_GRANT_FAILED", + DELETE_ENI_FAILED: "DELETE_ENI_FAILED", + DISABLED_KMS_KEY: "DISABLED_KMS_KEY", + ENI_ACCESS_DENIED: "ENI_ACCESS_DENIED", + INVALID_KMS_KEY: "INVALID_KMS_KEY", + KMS_ACCESS_DENIED: "KMS_ACCESS_DENIED", + KMS_KEY_NOT_FOUND: "KMS_KEY_NOT_FOUND", + KMS_OPT_IN_REQUIRED: "KMS_OPT_IN_REQUIRED", + RETIRE_KMS_GRANT_FAILED: "RETIRE_KMS_GRANT_FAILED", + SECURITY_GROUP_ACCESS_DENIED: "SECURITY_GROUP_ACCESS_DENIED", + SECURITY_GROUP_NOT_FOUND: "SECURITY_GROUP_NOT_FOUND", + SUBNET_ACCESS_DENIED: "SUBNET_ACCESS_DENIED", + SUBNET_NOT_FOUND: "SUBNET_NOT_FOUND", + UNKNOWN_ERROR: "UNKNOWN_ERROR", + VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND: "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND", + VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED: "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED", +} as const; + +/** + * @public + */ +export type DeliveryStreamFailureType = (typeof DeliveryStreamFailureType)[keyof typeof DeliveryStreamFailureType]; + +/** + *

    Provides details in case one of the following operations fails due to an error related + * to KMS: CreateDeliveryStream, DeleteDeliveryStream, + * StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

    + * @public + */ +export interface FailureDescription { + /** + *

    The type of error that caused the failure.

    + * @public + */ + Type: DeliveryStreamFailureType | undefined; + + /** + *

    A message providing details about the error that caused the failure.

    + * @public + */ + Details: string | undefined; +} + +/** + * @public + * @enum + */ +export const SnapshotRequestedBy = { + FIREHOSE: "FIREHOSE", + USER: "USER", +} as const; + +/** + * @public + */ +export type SnapshotRequestedBy = (typeof SnapshotRequestedBy)[keyof typeof SnapshotRequestedBy]; + +/** + * @public + * @enum + */ +export const SnapshotStatus = { + COMPLETE: "COMPLETE", + IN_PROGRESS: "IN_PROGRESS", + SUSPENDED: "SUSPENDED", +} as const; + +/** + * @public + */ +export type SnapshotStatus = (typeof SnapshotStatus)[keyof typeof SnapshotStatus]; + +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseSnapshotInfo { + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Id: string | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Table: string | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + RequestTimestamp: Date | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + RequestedBy: SnapshotRequestedBy | undefined; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + Status: SnapshotStatus | undefined; + + /** + *

    Provides details in case one of the following operations fails due to an error related + * to KMS: CreateDeliveryStream, DeleteDeliveryStream, + * StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

    + * @public + */ + FailureDescription?: FailureDescription; +} +/** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ +export interface DatabaseSourceDescription { /** - *

    The destination in Splunk. You can specify only one destination.

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - SplunkDestinationConfiguration?: SplunkDestinationConfiguration; + Type?: DatabaseType; /** - *

    Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. - * You can specify only one destination.

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - HttpEndpointDestinationConfiguration?: HttpEndpointDestinationConfiguration; + Endpoint?: string; /** - *

    A set of tags to assign to the delivery stream. A tag is a key-value pair that you can - * define and assign to Amazon Web Services resources. Tags are metadata. For example, you can - * add friendly names and descriptions or other types of information that can help you - * distinguish the delivery stream. For more information about tags, see Using - * Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User - * Guide.

    - *

    You can specify up to 50 tags when creating a delivery stream.

    - *

    If you specify tags in the CreateDeliveryStream action, Amazon Data - * Firehose performs an additional authorization on the - * firehose:TagDeliveryStream action to verify if users have permissions to - * create tags. If you do not provide this permission, requests to create new Firehose - * delivery streams with IAM resource tags will fail with an - * AccessDeniedException such as following.

    - *

    - * AccessDeniedException - *

    - *

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    - *

    For an example IAM policy, see Tag example. + *

    *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - Tags?: Tag[]; + Port?: number; /** - *

    The destination in the Serverless offering for Amazon OpenSearch Service. You can - * specify only one destination.

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration; + SSLMode?: SSLMode; /** - *

    The configuration for the Amazon MSK cluster to be used as the source for a delivery - * stream.

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - MSKSourceConfiguration?: MSKSourceConfiguration; + Databases?: DatabaseList; /** - *

    Configure Snowflake destination

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - SnowflakeDestinationConfiguration?: SnowflakeDestinationConfiguration; + Tables?: DatabaseTableList; /** *

    - * Configure Apache Iceberg Tables destination. - *

    + *

    *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - IcebergDestinationConfiguration?: IcebergDestinationConfiguration; -} + Columns?: DatabaseColumnList; -/** - * @public - */ -export interface CreateDeliveryStreamOutput { /** - *

    The ARN of the delivery stream.

    + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ - DeliveryStreamARN?: string; -} + SurrogateKeys?: string[]; -/** - *

    The specified input parameter has a value that is not valid.

    - * @public - */ -export class InvalidArgumentException extends __BaseException { - readonly name: "InvalidArgumentException" = "InvalidArgumentException"; - readonly $fault: "client" = "client"; /** - * @internal + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public */ - constructor(opts: __ExceptionOptionType) { - super({ - name: "InvalidArgumentException", - $fault: "client", - ...opts, - }); - Object.setPrototypeOf(this, InvalidArgumentException.prototype); - } -} + SnapshotWatermarkTable?: string; -/** - *

    Firehose throws this exception when an attempt to put records or to start - * or stop delivery stream encryption fails. This happens when the KMS service throws one of - * the following exception types: AccessDeniedException, - * InvalidStateException, DisabledException, or - * NotFoundException.

    - * @public - */ -export class InvalidKMSResourceException extends __BaseException { - readonly name: "InvalidKMSResourceException" = "InvalidKMSResourceException"; - readonly $fault: "client" = "client"; - code?: string; /** - * @internal + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public */ - constructor(opts: __ExceptionOptionType) { - super({ - name: "InvalidKMSResourceException", - $fault: "client", - ...opts, - }); - Object.setPrototypeOf(this, InvalidKMSResourceException.prototype); - this.code = opts.code; - } -} + SnapshotInfo?: DatabaseSnapshotInfo[]; -/** - *

    You have already reached the limit for a requested resource.

    - * @public - */ -export class LimitExceededException extends __BaseException { - readonly name: "LimitExceededException" = "LimitExceededException"; - readonly $fault: "client" = "client"; /** - * @internal + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public */ - constructor(opts: __ExceptionOptionType) { - super({ - name: "LimitExceededException", - $fault: "client", - ...opts, - }); - Object.setPrototypeOf(this, LimitExceededException.prototype); - } -} + DatabaseSourceAuthenticationConfiguration?: DatabaseSourceAuthenticationConfiguration; -/** - *

    The resource is already in use and not available for this operation.

    - * @public - */ -export class ResourceInUseException extends __BaseException { - readonly name: "ResourceInUseException" = "ResourceInUseException"; - readonly $fault: "client" = "client"; /** - * @internal + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public */ - constructor(opts: __ExceptionOptionType) { - super({ - name: "ResourceInUseException", - $fault: "client", - ...opts, - }); - Object.setPrototypeOf(this, ResourceInUseException.prototype); - } + DatabaseSourceVPCConfiguration?: DatabaseSourceVPCConfiguration; } /** @@ -3474,13 +4027,13 @@ export class ResourceInUseException extends __BaseException { */ export interface DeleteDeliveryStreamInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; /** - *

    Set this to true if you want to delete the delivery stream even if Firehose + *

    Set this to true if you want to delete the Firehose stream even if Firehose * is unable to retire the grant for the CMK. Firehose might be unable to retire * the grant due to a customer error, such as when the CMK or the grant are in an invalid * state. If you force deletion, you can then use the RevokeGrant operation to @@ -3518,53 +4071,6 @@ export class ResourceNotFoundException extends __BaseException { } } -/** - * @public - * @enum - */ -export const DeliveryStreamFailureType = { - CREATE_ENI_FAILED: "CREATE_ENI_FAILED", - CREATE_KMS_GRANT_FAILED: "CREATE_KMS_GRANT_FAILED", - DELETE_ENI_FAILED: "DELETE_ENI_FAILED", - DISABLED_KMS_KEY: "DISABLED_KMS_KEY", - ENI_ACCESS_DENIED: "ENI_ACCESS_DENIED", - INVALID_KMS_KEY: "INVALID_KMS_KEY", - KMS_ACCESS_DENIED: "KMS_ACCESS_DENIED", - KMS_KEY_NOT_FOUND: "KMS_KEY_NOT_FOUND", - KMS_OPT_IN_REQUIRED: "KMS_OPT_IN_REQUIRED", - RETIRE_KMS_GRANT_FAILED: "RETIRE_KMS_GRANT_FAILED", - SECURITY_GROUP_ACCESS_DENIED: "SECURITY_GROUP_ACCESS_DENIED", - SECURITY_GROUP_NOT_FOUND: "SECURITY_GROUP_NOT_FOUND", - SUBNET_ACCESS_DENIED: "SUBNET_ACCESS_DENIED", - SUBNET_NOT_FOUND: "SUBNET_NOT_FOUND", - UNKNOWN_ERROR: "UNKNOWN_ERROR", -} as const; - -/** - * @public - */ -export type DeliveryStreamFailureType = (typeof DeliveryStreamFailureType)[keyof typeof DeliveryStreamFailureType]; - -/** - *

    Provides details in case one of the following operations fails due to an error related - * to KMS: CreateDeliveryStream, DeleteDeliveryStream, - * StartDeliveryStreamEncryption, StopDeliveryStreamEncryption.

    - * @public - */ -export interface FailureDescription { - /** - *

    The type of error that caused the failure.

    - * @public - */ - Type: DeliveryStreamFailureType | undefined; - - /** - *

    A message providing details about the error that caused the failure.

    - * @public - */ - Details: string | undefined; -} - /** * @public * @enum @@ -3608,7 +4114,7 @@ export interface DeliveryStreamEncryptionConfiguration { KeyType?: KeyType; /** - *

    This is the server-side encryption (SSE) status for the delivery stream. For a full + *

    This is the server-side encryption (SSE) status for the Firehose stream. For a full * description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED * or DISABLING_FAILED, it is the status of the most recent attempt to enable or * disable SSE, respectively.

    @@ -3800,7 +4306,7 @@ export interface ExtendedS3DestinationDescription { EncryptionConfiguration: EncryptionConfiguration | undefined; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -3892,7 +4398,7 @@ export interface HttpEndpointDestinationDescription { BufferingHints?: HttpEndpointBufferingHints; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -3953,18 +4459,32 @@ export interface HttpEndpointDestinationDescription { *

    * Describes a destination in Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ export interface IcebergDestinationDescription { /** *

    Provides a list of DestinationTableConfigurations which Firehose uses - * to deliver data to Apache Iceberg tables.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    * @public */ DestinationTableConfigurationList?: DestinationTableConfiguration[]; + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + TableCreationConfiguration?: TableCreationConfiguration; + /** *

    Describes hints for the buffering to perform before delivering data to the * destination. These options are treated as hints, and therefore Firehose might @@ -3976,7 +4496,7 @@ export interface IcebergDestinationDescription { BufferingHints?: BufferingHints; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -3989,24 +4509,21 @@ export interface IcebergDestinationDescription { /** *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * FailedDataOnly.

    * @public */ S3BackupMode?: IcebergS3BackupMode; /** - *

    The retry behavior in case Firehose is unable to deliver data to an Amazon - * S3 prefix.

    + *

    The retry behavior in case Firehose is unable to deliver data to a destination.

    * @public */ RetryOptions?: RetryOptions; /** *

    - * The Amazon Resource Name (ARN) of the Apache Iceberg Tables role. + * The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ RoleARN?: string; @@ -4015,7 +4532,6 @@ export interface IcebergDestinationDescription { *

    * Configuration describing where the destination Iceberg tables are persisted. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ CatalogConfiguration?: CatalogConfiguration; @@ -4090,7 +4606,7 @@ export interface RedshiftDestinationDescription { S3BackupDescription?: S3DestinationDescription; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -4173,7 +4689,7 @@ export interface SnowflakeDestinationDescription { SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -4289,7 +4805,7 @@ export interface SplunkDestinationDescription { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -4310,7 +4826,7 @@ export interface SplunkDestinationDescription { } /** - *

    Describes the destination for a delivery stream.

    + *

    Describes the destination for a Firehose stream.

    * @public */ export interface DestinationDescription { @@ -4378,7 +4894,6 @@ export interface DestinationDescription { *

    * Describes a destination in Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ IcebergDestinationDescription?: IcebergDestinationDescription; @@ -4386,7 +4901,7 @@ export interface DestinationDescription { /** *

    Details about a Kinesis data stream used as the source for a Firehose - * delivery stream.

    + * Firehose stream.

    * @public */ export interface KinesisStreamSourceDescription { @@ -4416,7 +4931,7 @@ export interface KinesisStreamSourceDescription { /** *

    Details about the Amazon MSK cluster used as the source for a Firehose - * delivery stream.

    + * Firehose stream.

    * @public */ export interface MSKSourceDescription { @@ -4458,7 +4973,7 @@ export interface MSKSourceDescription { /** *

    Details about a Kinesis data stream used as the source for a Firehose - * delivery stream.

    + * Firehose stream.

    * @public */ export interface SourceDescription { @@ -4475,21 +4990,29 @@ export interface SourceDescription { * @public */ MSKSourceDescription?: MSKSourceDescription; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + DatabaseSourceDescription?: DatabaseSourceDescription; } /** - *

    Contains information about a delivery stream.

    + *

    Contains information about a Firehose stream.

    * @public */ export interface DeliveryStreamDescription { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; /** - *

    The Amazon Resource Name (ARN) of the delivery stream. For more information, see + *

    The Amazon Resource Name (ARN) of the Firehose stream. For more information, see * Amazon * Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    * @public @@ -4497,7 +5020,7 @@ export interface DeliveryStreamDescription { DeliveryStreamARN: string | undefined; /** - *

    The status of the delivery stream. If the status of a delivery stream is + *

    The status of the Firehose stream. If the status of a Firehose stream is * CREATING_FAILED, this status doesn't change, and you can't invoke * CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    * @public @@ -4513,22 +5036,22 @@ export interface DeliveryStreamDescription { FailureDescription?: FailureDescription; /** - *

    Indicates the server-side encryption (SSE) status for the delivery stream.

    + *

    Indicates the server-side encryption (SSE) status for the Firehose stream.

    * @public */ DeliveryStreamEncryptionConfiguration?: DeliveryStreamEncryptionConfiguration; /** - *

    The delivery stream type. This can be one of the following values:

    + *

    The Firehose stream type. This can be one of the following values:

    *
      *
    • *

      - * DirectPut: Provider applications access the delivery stream + * DirectPut: Provider applications access the Firehose stream * directly.

      *
    • *
    • *

      - * KinesisStreamAsSource: The delivery stream uses a Kinesis data + * KinesisStreamAsSource: The Firehose stream uses a Kinesis data * stream as a source.

      *
    • *
    @@ -4537,7 +5060,7 @@ export interface DeliveryStreamDescription { DeliveryStreamType: DeliveryStreamType | undefined; /** - *

    Each time the destination is updated for a delivery stream, the version ID is + *

    Each time the destination is updated for a Firehose stream, the version ID is * changed, and the current version ID is required when updating the destination. This is so * that the service knows it is applying the changes to the correct version of the delivery * stream.

    @@ -4546,13 +5069,13 @@ export interface DeliveryStreamDescription { VersionId: string | undefined; /** - *

    The date and time that the delivery stream was created.

    + *

    The date and time that the Firehose stream was created.

    * @public */ CreateTimestamp?: Date; /** - *

    The date and time that the delivery stream was last updated.

    + *

    The date and time that the Firehose stream was last updated.

    * @public */ LastUpdateTimestamp?: Date; @@ -4583,20 +5106,20 @@ export interface DeliveryStreamDescription { */ export interface DescribeDeliveryStreamInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; /** *

    The limit on the number of destinations to return. You can have one destination per - * delivery stream.

    + * Firehose stream.

    * @public */ Limit?: number; /** - *

    The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

    + *

    The ID of the destination to start returning the destination information. Firehose supports one destination per Firehose stream.

    * @public */ ExclusiveStartDestinationId?: string; @@ -4607,7 +5130,7 @@ export interface DescribeDeliveryStreamInput { */ export interface DescribeDeliveryStreamOutput { /** - *

    Information about the delivery stream.

    + *

    Information about the Firehose stream.

    * @public */ DeliveryStreamDescription: DeliveryStreamDescription | undefined; @@ -4655,9 +5178,9 @@ export interface ElasticsearchDestinationUpdate { *

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per * index. If you try to specify a new type for an existing index that already has another * type, Firehose returns an error during runtime.

    - *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream, + *

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream, * Firehose still delivers data to Elasticsearch with the old index name and type - * name. If you want to update your delivery stream with a new index name, provide an empty + * name. If you want to update your Firehose stream with a new index name, provide an empty * string for TypeName.

    * @public */ @@ -4699,7 +5222,7 @@ export interface ElasticsearchDestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The CloudWatch logging options for your delivery stream.

    + *

    The CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -4769,7 +5292,7 @@ export interface ExtendedS3DestinationUpdate { EncryptionConfiguration?: EncryptionConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -4781,8 +5304,8 @@ export interface ExtendedS3DestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If - * backup is enabled, you can't update the delivery stream to disable it.

    + *

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If + * backup is enabled, you can't update the Firehose stream to disable it.

    * @public */ S3BackupMode?: S3BackupMode; @@ -4827,34 +5350,34 @@ export interface ExtendedS3DestinationUpdate { */ export interface ListDeliveryStreamsInput { /** - *

    The maximum number of delivery streams to list. The default value is 10.

    + *

    The maximum number of Firehose streams to list. The default value is 10.

    * @public */ Limit?: number; /** - *

    The delivery stream type. This can be one of the following values:

    + *

    The Firehose stream type. This can be one of the following values:

    *
      *
    • *

      - * DirectPut: Provider applications access the delivery stream + * DirectPut: Provider applications access the Firehose stream * directly.

      *
    • *
    • *

      - * KinesisStreamAsSource: The delivery stream uses a Kinesis data + * KinesisStreamAsSource: The Firehose stream uses a Kinesis data * stream as a source.

      *
    • *
    - *

    This parameter is optional. If this parameter is omitted, delivery streams of all + *

    This parameter is optional. If this parameter is omitted, Firehose streams of all * types are returned.

    * @public */ DeliveryStreamType?: DeliveryStreamType; /** - *

    The list of delivery streams returned by this call to - * ListDeliveryStreams will start with the delivery stream whose name comes + *

    The list of Firehose streams returned by this call to + * ListDeliveryStreams will start with the Firehose stream whose name comes * alphabetically immediately after the name you specify in * ExclusiveStartDeliveryStreamName.

    * @public @@ -4867,13 +5390,13 @@ export interface ListDeliveryStreamsInput { */ export interface ListDeliveryStreamsOutput { /** - *

    The names of the delivery streams.

    + *

    The names of the Firehose streams.

    * @public */ DeliveryStreamNames: string[] | undefined; /** - *

    Indicates whether there are more delivery streams available to list.

    + *

    Indicates whether there are more Firehose streams available to list.

    * @public */ HasMoreDeliveryStreams: boolean | undefined; @@ -4884,7 +5407,7 @@ export interface ListDeliveryStreamsOutput { */ export interface ListTagsForDeliveryStreamInput { /** - *

    The name of the delivery stream whose tags you want to list.

    + *

    The name of the Firehose stream whose tags you want to list.

    * @public */ DeliveryStreamName: string | undefined; @@ -4899,7 +5422,7 @@ export interface ListTagsForDeliveryStreamInput { /** *

    The number of tags to return. If this number is less than the total number of tags - * associated with the delivery stream, HasMoreTags is set to true + * associated with the Firehose stream, HasMoreTags is set to true * in the response. To list additional tags, set ExclusiveStartTagKey to the last * key in the response.

    * @public @@ -4951,7 +5474,7 @@ export class InvalidSourceException extends __BaseException { } /** - *

    The unit of data in a delivery stream.

    + *

    The unit of data in a Firehose stream.

    * @public */ export interface _Record { @@ -4968,7 +5491,7 @@ export interface _Record { */ export interface PutRecordInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; @@ -4999,7 +5522,7 @@ export interface PutRecordOutput { /** *

    The service is unavailable. Back off and retry the operation. If you continue to see - * the exception, throughput limits for the delivery stream may have been exceeded. For more + * the exception, throughput limits for the Firehose stream may have been exceeded. For more * information about limits and how to request an increase, see Amazon Firehose * Limits.

    * @public @@ -5025,7 +5548,7 @@ export class ServiceUnavailableException extends __BaseException { */ export interface PutRecordBatchInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; @@ -5039,8 +5562,8 @@ export interface PutRecordBatchInput { /** *

    Contains the result for an individual record from a PutRecordBatch - * request. If the record is successfully added to your delivery stream, it receives a record - * ID. If the record fails to be added to your delivery stream, the result includes an error + * request. If the record is successfully added to your Firehose stream, it receives a record + * ID. If the record fails to be added to your Firehose stream, the result includes an error * code and an error message.

    * @public */ @@ -5096,7 +5619,7 @@ export interface PutRecordBatchOutput { */ export interface StartDeliveryStreamEncryptionInput { /** - *

    The name of the delivery stream for which you want to enable server-side encryption + *

    The name of the Firehose stream for which you want to enable server-side encryption * (SSE).

    * @public */ @@ -5120,7 +5643,7 @@ export interface StartDeliveryStreamEncryptionOutput {} */ export interface StopDeliveryStreamEncryptionInput { /** - *

    The name of the delivery stream for which you want to disable server-side encryption + *

    The name of the Firehose stream for which you want to disable server-side encryption * (SSE).

    * @public */ @@ -5137,7 +5660,7 @@ export interface StopDeliveryStreamEncryptionOutput {} */ export interface TagDeliveryStreamInput { /** - *

    The name of the delivery stream to which you want to add the tags.

    + *

    The name of the Firehose stream to which you want to add the tags.

    * @public */ DeliveryStreamName: string | undefined; @@ -5159,7 +5682,7 @@ export interface TagDeliveryStreamOutput {} */ export interface UntagDeliveryStreamInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; @@ -5199,7 +5722,7 @@ export interface HttpEndpointDestinationUpdate { BufferingHints?: HttpEndpointBufferingHints; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -5260,18 +5783,32 @@ export interface HttpEndpointDestinationUpdate { *

    * Describes an update for a destination in Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ export interface IcebergDestinationUpdate { /** *

    Provides a list of DestinationTableConfigurations which Firehose uses - * to deliver data to Apache Iceberg tables.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    * @public */ DestinationTableConfigurationList?: DestinationTableConfiguration[]; + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + SchemaEvolutionConfiguration?: SchemaEvolutionConfiguration; + + /** + *

    + *

    + *

    Amazon Data Firehose is in preview release and is subject to change.

    + * @public + */ + TableCreationConfiguration?: TableCreationConfiguration; + /** *

    Describes hints for the buffering to perform before delivering data to the * destination. These options are treated as hints, and therefore Firehose might @@ -5283,7 +5820,7 @@ export interface IcebergDestinationUpdate { BufferingHints?: BufferingHints; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -5296,24 +5833,21 @@ export interface IcebergDestinationUpdate { /** *

    Describes how Firehose will backup records. Currently,Firehose only supports - * FailedDataOnly for preview.

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    + * FailedDataOnly.

    * @public */ S3BackupMode?: IcebergS3BackupMode; /** - *

    The retry behavior in case Firehose is unable to deliver data to an Amazon - * S3 prefix.

    + *

    The retry behavior in case Firehose is unable to deliver data to a destination.

    * @public */ RetryOptions?: RetryOptions; /** *

    - * The Amazon Resource Name (ARN) of the Apache Iceberg Tables role. + * The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ RoleARN?: string; @@ -5322,7 +5856,6 @@ export interface IcebergDestinationUpdate { *

    * Configuration describing where the destination Iceberg tables are persisted. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ CatalogConfiguration?: CatalogConfiguration; @@ -5395,8 +5928,8 @@ export interface RedshiftDestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If - * backup is enabled, you can't update the delivery stream to disable it.

    + *

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If + * backup is enabled, you can't update the Firehose stream to disable it.

    * @public */ S3BackupMode?: RedshiftS3BackupMode; @@ -5408,7 +5941,7 @@ export interface RedshiftDestinationUpdate { S3BackupUpdate?: S3DestinationUpdate; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -5495,7 +6028,7 @@ export interface SnowflakeDestinationUpdate { ContentColumnName?: string; /** - *

    Describes the Amazon CloudWatch logging options for your delivery stream.

    + *

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -5527,7 +6060,8 @@ export interface SnowflakeDestinationUpdate { RetryOptions?: SnowflakeRetryOptions; /** - *

    Choose an S3 backup mode

    + *

    Choose an S3 backup mode. Once you set the mode as AllData, you can not + * change it to FailedDataOnly.

    * @public */ S3BackupMode?: SnowflakeS3BackupMode; @@ -5622,7 +6156,7 @@ export interface SplunkDestinationUpdate { ProcessingConfiguration?: ProcessingConfiguration; /** - *

    The Amazon CloudWatch logging options for your delivery stream.

    + *

    The Amazon CloudWatch logging options for your Firehose stream.

    * @public */ CloudWatchLoggingOptions?: CloudWatchLoggingOptions; @@ -5647,7 +6181,7 @@ export interface SplunkDestinationUpdate { */ export interface UpdateDestinationInput { /** - *

    The name of the delivery stream.

    + *

    The name of the Firehose stream.

    * @public */ DeliveryStreamName: string | undefined; @@ -5729,7 +6263,6 @@ export interface UpdateDestinationInput { *

    * Describes an update for a destination in Apache Iceberg Tables. *

    - *

    Amazon Data Firehose is in preview release and is subject to change.

    * @public */ IcebergDestinationUpdate?: IcebergDestinationUpdate; @@ -5938,9 +6471,6 @@ export const DeliveryStreamDescriptionFilterSensitiveLog = (obj: DeliveryStreamD */ export const DescribeDeliveryStreamOutputFilterSensitiveLog = (obj: DescribeDeliveryStreamOutput): any => ({ ...obj, - ...(obj.DeliveryStreamDescription && { - DeliveryStreamDescription: DeliveryStreamDescriptionFilterSensitiveLog(obj.DeliveryStreamDescription), - }), }); /** diff --git a/clients/client-firehose/src/protocols/Aws_json1_1.ts b/clients/client-firehose/src/protocols/Aws_json1_1.ts index 8aab5b5a4090..e1c5cfd11409 100644 --- a/clients/client-firehose/src/protocols/Aws_json1_1.ts +++ b/clients/client-firehose/src/protocols/Aws_json1_1.ts @@ -77,6 +77,14 @@ import { ConcurrentModificationException, CopyCommand, CreateDeliveryStreamInput, + DatabaseColumnList, + DatabaseList, + DatabaseSnapshotInfo, + DatabaseSourceAuthenticationConfiguration, + DatabaseSourceConfiguration, + DatabaseSourceDescription, + DatabaseSourceVPCConfiguration, + DatabaseTableList, DataFormatConversionConfiguration, DeleteDeliveryStreamInput, DeliveryStreamDescription, @@ -122,6 +130,8 @@ import { OrcSerDe, OutputFormatConfiguration, ParquetSerDe, + PartitionField, + PartitionSpec, ProcessingConfiguration, Processor, ProcessorParameter, @@ -136,6 +146,7 @@ import { S3DestinationConfiguration, S3DestinationUpdate, SchemaConfiguration, + SchemaEvolutionConfiguration, SecretsManagerConfiguration, Serializer, ServiceUnavailableException, @@ -152,6 +163,7 @@ import { SplunkRetryOptions, StartDeliveryStreamEncryptionInput, StopDeliveryStreamEncryptionInput, + TableCreationConfiguration, Tag, TagDeliveryStreamInput, UntagDeliveryStreamInput, @@ -762,6 +774,7 @@ const se_CreateDeliveryStreamInput = (input: CreateDeliveryStreamInput, context: return take(input, { AmazonOpenSearchServerlessDestinationConfiguration: _json, AmazonopensearchserviceDestinationConfiguration: _json, + DatabaseSourceConfiguration: _json, DeliveryStreamEncryptionConfigurationInput: _json, DeliveryStreamName: [], DeliveryStreamType: [], @@ -779,6 +792,26 @@ const se_CreateDeliveryStreamInput = (input: CreateDeliveryStreamInput, context: }); }; +// se_DatabaseColumnIncludeOrExcludeList omitted. + +// se_DatabaseColumnList omitted. + +// se_DatabaseIncludeOrExcludeList omitted. + +// se_DatabaseList omitted. + +// se_DatabaseSourceAuthenticationConfiguration omitted. + +// se_DatabaseSourceConfiguration omitted. + +// se_DatabaseSourceVPCConfiguration omitted. + +// se_DatabaseSurrogateKeyList omitted. + +// se_DatabaseTableIncludeOrExcludeList omitted. + +// se_DatabaseTableList omitted. + /** * serializeAws_json1_1DataFormatConversionConfiguration */ @@ -948,6 +981,12 @@ const se_OutputFormatConfiguration = (input: OutputFormatConfiguration, context: // se_ParquetSerDe omitted. +// se_PartitionField omitted. + +// se_PartitionFields omitted. + +// se_PartitionSpec omitted. + // se_ProcessingConfiguration omitted. // se_Processor omitted. @@ -1012,6 +1051,8 @@ const se__Record = (input: _Record, context: __SerdeContext): any => { // se_SchemaConfiguration omitted. +// se_SchemaEvolutionConfiguration omitted. + // se_SecretsManagerConfiguration omitted. // se_SecurityGroupIdList omitted. @@ -1052,6 +1093,8 @@ const se_Serializer = (input: Serializer, context: __SerdeContext): any => { // se_SubnetIdList omitted. +// se_TableCreationConfiguration omitted. + // se_Tag omitted. // se_TagDeliveryStreamInput omitted. @@ -1113,6 +1156,68 @@ const se_UpdateDestinationInput = (input: UpdateDestinationInput, context: __Ser // de_CreateDeliveryStreamOutput omitted. +// de_DatabaseColumnIncludeOrExcludeList omitted. + +// de_DatabaseColumnList omitted. + +// de_DatabaseIncludeOrExcludeList omitted. + +// de_DatabaseList omitted. + +/** + * deserializeAws_json1_1DatabaseSnapshotInfo + */ +const de_DatabaseSnapshotInfo = (output: any, context: __SerdeContext): DatabaseSnapshotInfo => { + return take(output, { + FailureDescription: _json, + Id: __expectString, + RequestTimestamp: (_: any) => __expectNonNull(__parseEpochTimestamp(__expectNumber(_))), + RequestedBy: __expectString, + Status: __expectString, + Table: __expectString, + }) as any; +}; + +/** + * deserializeAws_json1_1DatabaseSnapshotInfoList + */ +const de_DatabaseSnapshotInfoList = (output: any, context: __SerdeContext): DatabaseSnapshotInfo[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + return de_DatabaseSnapshotInfo(entry, context); + }); + return retVal; +}; + +// de_DatabaseSourceAuthenticationConfiguration omitted. + +/** + * deserializeAws_json1_1DatabaseSourceDescription + */ +const de_DatabaseSourceDescription = (output: any, context: __SerdeContext): DatabaseSourceDescription => { + return take(output, { + Columns: _json, + DatabaseSourceAuthenticationConfiguration: _json, + DatabaseSourceVPCConfiguration: _json, + Databases: _json, + Endpoint: __expectString, + Port: __expectInt32, + SSLMode: __expectString, + SnapshotInfo: (_: any) => de_DatabaseSnapshotInfoList(_, context), + SnapshotWatermarkTable: __expectString, + SurrogateKeys: _json, + Tables: _json, + Type: __expectString, + }) as any; +}; + +// de_DatabaseSourceVPCConfiguration omitted. + +// de_DatabaseTableIncludeOrExcludeList omitted. + +// de_DatabaseTableList omitted. + /** * deserializeAws_json1_1DataFormatConversionConfiguration */ @@ -1335,6 +1440,12 @@ const de_OutputFormatConfiguration = (output: any, context: __SerdeContext): Out // de_ParquetSerDe omitted. +// de_PartitionField omitted. + +// de_PartitionFields omitted. + +// de_PartitionSpec omitted. + // de_ProcessingConfiguration omitted. // de_Processor omitted. @@ -1367,6 +1478,8 @@ const de_OutputFormatConfiguration = (output: any, context: __SerdeContext): Out // de_SchemaConfiguration omitted. +// de_SchemaEvolutionConfiguration omitted. + // de_SecretsManagerConfiguration omitted. // de_SecurityGroupIdList omitted. @@ -1398,6 +1511,7 @@ const de_Serializer = (output: any, context: __SerdeContext): Serializer => { */ const de_SourceDescription = (output: any, context: __SerdeContext): SourceDescription => { return take(output, { + DatabaseSourceDescription: (_: any) => de_DatabaseSourceDescription(_, context), KinesisStreamSourceDescription: (_: any) => de_KinesisStreamSourceDescription(_, context), MSKSourceDescription: (_: any) => de_MSKSourceDescription(_, context), }) as any; @@ -1415,6 +1529,8 @@ const de_SourceDescription = (output: any, context: __SerdeContext): SourceDescr // de_SubnetIdList omitted. +// de_TableCreationConfiguration omitted. + // de_Tag omitted. // de_TagDeliveryStreamOutput omitted. diff --git a/codegen/sdk-codegen/aws-models/firehose.json b/codegen/sdk-codegen/aws-models/firehose.json index 16788d985047..f9047c401c06 100644 --- a/codegen/sdk-codegen/aws-models/firehose.json +++ b/codegen/sdk-codegen/aws-models/firehose.json @@ -36,7 +36,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:kms:[a-zA-Z0-9\\-]+:\\d{12}:(key|alias)/[a-zA-Z_0-9+=,.@\\-_/]+$" } }, "com.amazonaws.firehose#AmazonOpenSearchServerlessBufferingHints": { @@ -51,7 +51,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#AmazonOpenSearchServerlessBufferingSizeInMBs", "traits": { - "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the delivery stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the Firehose stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } } }, @@ -313,7 +313,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#AmazonopensearchserviceBufferingSizeInMBs", "traits": { - "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the delivery stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the Firehose stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } } }, @@ -544,7 +544,7 @@ "TypeName": { "target": "com.amazonaws.firehose#AmazonopensearchserviceTypeName", "traits": { - "smithy.api#documentation": "

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one\n type per index. If you try to specify a new type for an existing index that already has\n another type, Firehose returns an error during runtime.

    \n

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream,\n Firehose still delivers data to Elasticsearch with the old index name and type\n name. If you want to update your delivery stream with a new index name, provide an empty\n string for TypeName.

    " + "smithy.api#documentation": "

    The Amazon OpenSearch Service type name. For Elasticsearch 6.x, there can be only one\n type per index. If you try to specify a new type for an existing index that already has\n another type, Firehose returns an error during runtime.

    \n

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream,\n Firehose still delivers data to Elasticsearch with the old index name and type\n name. If you want to update your Firehose stream with a new index name, provide an empty\n string for TypeName.

    " } }, "IndexRotationPeriod": { @@ -592,7 +592,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:es:[a-zA-Z0-9\\-]+:\\d{12}:domain/[a-z][-0-9a-z]{2,27}$" } }, "com.amazonaws.firehose#AmazonopensearchserviceIndexName": { @@ -730,7 +730,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:s3:::[\\w\\.\\-]{1,255}$" } }, "com.amazonaws.firehose#BufferingHints": { @@ -739,7 +739,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#SizeInMBs", "traits": { - "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MiBs, before delivering it to the\n destination. The default value is 5. This parameter is optional but if you specify a value\n for it, you must also specify a value for IntervalInSeconds, and vice\n versa.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the delivery stream in 10 seconds. For example, if you typically\n ingest data at 1 MiB/sec, the value should be 10 MiB or higher.

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MiBs, before delivering it to the\n destination. The default value is 5. This parameter is optional but if you specify a value\n for it, you must also specify a value for IntervalInSeconds, and vice\n versa.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the Firehose stream in 10 seconds. For example, if you typically\n ingest data at 1 MiB/sec, the value should be 10 MiB or higher.

    " } }, "IntervalInSeconds": { @@ -759,12 +759,18 @@ "CatalogARN": { "target": "com.amazonaws.firehose#GlueDataCatalogARN", "traits": { - "smithy.api#documentation": "

    \n Specifies the Glue catalog ARN indentifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Specifies the Glue catalog ARN identifier of the destination Apache Iceberg Tables. You must specify the ARN in the format arn:aws:glue:region:account-id:catalog.\n

    " + } + }, + "WarehouseLocation": { + "target": "com.amazonaws.firehose#WarehouseLocation", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } } }, "traits": { - "smithy.api#documentation": "

    \n Describes the containers where the destination Apache Iceberg Tables are persisted.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes the containers where the destination Apache Iceberg Tables are persisted.\n

    " } }, "com.amazonaws.firehose#CloudWatchLoggingOptions": { @@ -790,7 +796,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Describes the Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    Describes the Amazon CloudWatch logging options for your Firehose stream.

    " } }, "com.amazonaws.firehose#ClusterJDBCURL": { @@ -800,7 +806,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^jdbc:(redshift|postgresql)://((?!-)[A-Za-z0-9-]{1,63}(?Creates a Firehose delivery stream.

    \n

    By default, you can create up to 50 delivery streams per Amazon Web Services\n Region.

    \n

    This is an asynchronous operation that immediately returns. The initial status of the\n delivery stream is CREATING. After the delivery stream is created, its status\n is ACTIVE and it now accepts data. If the delivery stream creation fails, the\n status transitions to CREATING_FAILED. Attempts to send data to a delivery\n stream that is not in the ACTIVE state cause an exception. To check the state\n of a delivery stream, use DescribeDeliveryStream.

    \n

    If the status of a delivery stream is CREATING_FAILED, this status\n doesn't change, and you can't invoke CreateDeliveryStream again on it.\n However, you can invoke the DeleteDeliveryStream operation to delete\n it.

    \n

    A Firehose delivery stream can be configured to receive records directly\n from providers using PutRecord or PutRecordBatch, or it\n can be configured to use an existing Kinesis stream as its source. To specify a Kinesis\n data stream as input, set the DeliveryStreamType parameter to\n KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name\n (ARN) and role ARN in the KinesisStreamSourceConfiguration\n parameter.

    \n

    To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is\n optional. You can also invoke StartDeliveryStreamEncryption to turn on\n SSE for an existing delivery stream that doesn't have SSE enabled.

    \n

    A delivery stream is configured with a single destination, such as Amazon Simple\n Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch\n Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by\n third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New\n Relic, and Sumo Logic. You must specify only one of the following destination configuration\n parameters: ExtendedS3DestinationConfiguration,\n S3DestinationConfiguration,\n ElasticsearchDestinationConfiguration,\n RedshiftDestinationConfiguration, or\n SplunkDestinationConfiguration.

    \n

    When you specify S3DestinationConfiguration, you can also provide the\n following optional values: BufferingHints, EncryptionConfiguration, and\n CompressionFormat. By default, if no BufferingHints value is\n provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever\n condition is satisfied first. BufferingHints is a hint, so there are some\n cases where the service cannot adhere to these conditions strictly. For example, record\n boundaries might be such that the size is a little over or under the configured buffering\n size. By default, no encryption is performed. We strongly recommend that you enable\n encryption to ensure secure data storage in Amazon S3.

    \n

    A few notes about Amazon Redshift as a destination:

    \n
      \n
    • \n

      An Amazon Redshift destination requires an S3 bucket as intermediate location.\n Firehose first delivers data to Amazon S3 and then uses\n COPY syntax to load data into an Amazon Redshift table. This is\n specified in the RedshiftDestinationConfiguration.S3Configuration\n parameter.

      \n
    • \n
    • \n

      The compression formats SNAPPY or ZIP cannot be\n specified in RedshiftDestinationConfiguration.S3Configuration because\n the Amazon Redshift COPY operation that reads from the S3 bucket doesn't\n support these compression formats.

      \n
    • \n
    • \n

      We strongly recommend that you use the user name and password you provide\n exclusively with Firehose, and that the permissions for the account are\n restricted for Amazon Redshift INSERT permissions.

      \n
    • \n
    \n

    Firehose assumes the IAM role that is configured as part of the\n destination. The role should allow the Firehose principal to assume the role,\n and the role should have permissions that allow the service to deliver the data. For more\n information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

    " + "smithy.api#documentation": "

    Creates a Firehose stream.

    \n

    By default, you can create up to 50 Firehose streams per Amazon Web Services\n Region.

    \n

    This is an asynchronous operation that immediately returns. The initial status of the\n Firehose stream is CREATING. After the Firehose stream is created, its status\n is ACTIVE and it now accepts data. If the Firehose stream creation fails, the\n status transitions to CREATING_FAILED. Attempts to send data to a delivery\n stream that is not in the ACTIVE state cause an exception. To check the state\n of a Firehose stream, use DescribeDeliveryStream.

    \n

    If the status of a Firehose stream is CREATING_FAILED, this status\n doesn't change, and you can't invoke CreateDeliveryStream again on it.\n However, you can invoke the DeleteDeliveryStream operation to delete\n it.

    \n

    A Firehose stream can be configured to receive records directly\n from providers using PutRecord or PutRecordBatch, or it\n can be configured to use an existing Kinesis stream as its source. To specify a Kinesis\n data stream as input, set the DeliveryStreamType parameter to\n KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name\n (ARN) and role ARN in the KinesisStreamSourceConfiguration\n parameter.

    \n

    To create a Firehose stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is\n optional. You can also invoke StartDeliveryStreamEncryption to turn on\n SSE for an existing Firehose stream that doesn't have SSE enabled.

    \n

    A Firehose stream is configured with a single destination, such as Amazon Simple\n Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch\n Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by\n third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New\n Relic, and Sumo Logic. You must specify only one of the following destination configuration\n parameters: ExtendedS3DestinationConfiguration,\n S3DestinationConfiguration,\n ElasticsearchDestinationConfiguration,\n RedshiftDestinationConfiguration, or\n SplunkDestinationConfiguration.

    \n

    When you specify S3DestinationConfiguration, you can also provide the\n following optional values: BufferingHints, EncryptionConfiguration, and\n CompressionFormat. By default, if no BufferingHints value is\n provided, Firehose buffers data up to 5 MB or for 5 minutes, whichever\n condition is satisfied first. BufferingHints is a hint, so there are some\n cases where the service cannot adhere to these conditions strictly. For example, record\n boundaries might be such that the size is a little over or under the configured buffering\n size. By default, no encryption is performed. We strongly recommend that you enable\n encryption to ensure secure data storage in Amazon S3.

    \n

    A few notes about Amazon Redshift as a destination:

    \n
      \n
    • \n

      An Amazon Redshift destination requires an S3 bucket as intermediate location.\n Firehose first delivers data to Amazon S3 and then uses\n COPY syntax to load data into an Amazon Redshift table. This is\n specified in the RedshiftDestinationConfiguration.S3Configuration\n parameter.

      \n
    • \n
    • \n

      The compression formats SNAPPY or ZIP cannot be\n specified in RedshiftDestinationConfiguration.S3Configuration because\n the Amazon Redshift COPY operation that reads from the S3 bucket doesn't\n support these compression formats.

      \n
    • \n
    • \n

      We strongly recommend that you use the user name and password you provide\n exclusively with Firehose, and that the permissions for the account are\n restricted for Amazon Redshift INSERT permissions.

      \n
    • \n
    \n

    Firehose assumes the IAM role that is configured as part of the\n destination. The role should allow the Firehose principal to assume the role,\n and the role should have permissions that allow the service to deliver the data. For more\n information, see Grant Firehose Access to an Amazon S3 Destination in the Amazon Firehose Developer Guide.

    " } }, "com.amazonaws.firehose#CreateDeliveryStreamInput": { @@ -965,20 +971,20 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream. This name must be unique per Amazon Web Services\n account in the same Amazon Web Services Region. If the delivery streams are in different\n accounts or different Regions, you can have multiple delivery streams with the same\n name.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream. This name must be unique per Amazon Web Services\n account in the same Amazon Web Services Region. If the Firehose streams are in different\n accounts or different Regions, you can have multiple Firehose streams with the same\n name.

    ", "smithy.api#required": {} } }, "DeliveryStreamType": { "target": "com.amazonaws.firehose#DeliveryStreamType", "traits": { - "smithy.api#documentation": "

    The delivery stream type. This parameter can be one of the following\n values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the delivery stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The delivery stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    " + "smithy.api#documentation": "

    The Firehose stream type. This parameter can be one of the following\n values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the Firehose stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The Firehose stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    " } }, "KinesisStreamSourceConfiguration": { "target": "com.amazonaws.firehose#KinesisStreamSourceConfiguration", "traits": { - "smithy.api#documentation": "

    When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon\n Resource Name (ARN) and the role ARN for the source stream.

    " + "smithy.api#documentation": "

    When a Kinesis data stream is used as the source for the Firehose stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon\n Resource Name (ARN) and the role ARN for the source stream.

    " } }, "DeliveryStreamEncryptionConfigurationInput": { @@ -1033,7 +1039,7 @@ "Tags": { "target": "com.amazonaws.firehose#TagDeliveryStreamInputTagList", "traits": { - "smithy.api#documentation": "

    A set of tags to assign to the delivery stream. A tag is a key-value pair that you can\n define and assign to Amazon Web Services resources. Tags are metadata. For example, you can\n add friendly names and descriptions or other types of information that can help you\n distinguish the delivery stream. For more information about tags, see Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide.

    \n

    You can specify up to 50 tags when creating a delivery stream.

    \n

    If you specify tags in the CreateDeliveryStream action, Amazon Data\n Firehose performs an additional authorization on the\n firehose:TagDeliveryStream action to verify if users have permissions to\n create tags. If you do not provide this permission, requests to create new Firehose\n delivery streams with IAM resource tags will fail with an\n AccessDeniedException such as following.

    \n

    \n AccessDeniedException\n

    \n

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    \n

    For an example IAM policy, see Tag example.\n

    " + "smithy.api#documentation": "

    A set of tags to assign to the Firehose stream. A tag is a key-value pair that you can\n define and assign to Amazon Web Services resources. Tags are metadata. For example, you can\n add friendly names and descriptions or other types of information that can help you\n distinguish the Firehose stream. For more information about tags, see Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide.

    \n

    You can specify up to 50 tags when creating a Firehose stream.

    \n

    If you specify tags in the CreateDeliveryStream action, Amazon Data\n Firehose performs an additional authorization on the\n firehose:TagDeliveryStream action to verify if users have permissions to\n create tags. If you do not provide this permission, requests to create new Firehose\n Firehose streams with IAM resource tags will fail with an\n AccessDeniedException such as following.

    \n

    \n AccessDeniedException\n

    \n

    User: arn:aws:sts::x:assumed-role/x/x is not authorized to perform: firehose:TagDeliveryStream on resource: arn:aws:firehose:us-east-1:x:deliverystream/x with an explicit deny in an identity-based policy.

    \n

    For an example IAM policy, see Tag example.\n

    " } }, "AmazonOpenSearchServerlessDestinationConfiguration": { @@ -1054,7 +1060,13 @@ "IcebergDestinationConfiguration": { "target": "com.amazonaws.firehose#IcebergDestinationConfiguration", "traits": { - "smithy.api#documentation": "

    \n Configure Apache Iceberg Tables destination.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Configure Apache Iceberg Tables destination.\n

    " + } + }, + "DatabaseSourceConfiguration": { + "target": "com.amazonaws.firehose#DatabaseSourceConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } } }, @@ -1068,7 +1080,7 @@ "DeliveryStreamARN": { "target": "com.amazonaws.firehose#DeliveryStreamARN", "traits": { - "smithy.api#documentation": "

    The ARN of the delivery stream.

    " + "smithy.api#documentation": "

    The ARN of the Firehose stream.

    " } } }, @@ -1082,7 +1094,8 @@ "smithy.api#length": { "min": 0, "max": 50 - } + }, + "smithy.api#pattern": "^$|[a-zA-Z/_]+$" } }, "com.amazonaws.firehose#Data": { @@ -1131,7 +1144,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 204800 + "max": 10240 }, "smithy.api#pattern": ".*" } @@ -1146,6 +1159,399 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.firehose#DatabaseColumnIncludeOrExcludeList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#DatabaseColumnName" + } + }, + "com.amazonaws.firehose#DatabaseColumnList": { + "type": "structure", + "members": { + "Include": { + "target": "com.amazonaws.firehose#DatabaseColumnIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Exclude": { + "target": "com.amazonaws.firehose#DatabaseColumnIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseColumnName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 194 + }, + "smithy.api#pattern": "^[\\u0001-\\uFFFF]*$" + } + }, + "com.amazonaws.firehose#DatabaseEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^(?!\\s*$).+$" + } + }, + "com.amazonaws.firehose#DatabaseIncludeOrExcludeList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#DatabaseName" + } + }, + "com.amazonaws.firehose#DatabaseList": { + "type": "structure", + "members": { + "Include": { + "target": "com.amazonaws.firehose#DatabaseIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Exclude": { + "target": "com.amazonaws.firehose#DatabaseIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^[\\u0001-\\uFFFF]*$" + } + }, + "com.amazonaws.firehose#DatabasePort": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 65535 + } + } + }, + "com.amazonaws.firehose#DatabaseSnapshotInfo": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Table": { + "target": "com.amazonaws.firehose#DatabaseTableName", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "RequestTimestamp": { + "target": "com.amazonaws.firehose#Timestamp", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "RequestedBy": { + "target": "com.amazonaws.firehose#SnapshotRequestedBy", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.firehose#SnapshotStatus", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "FailureDescription": { + "target": "com.amazonaws.firehose#FailureDescription" + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseSnapshotInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#DatabaseSnapshotInfo" + } + }, + "com.amazonaws.firehose#DatabaseSourceAuthenticationConfiguration": { + "type": "structure", + "members": { + "SecretsManagerConfiguration": { + "target": "com.amazonaws.firehose#SecretsManagerConfiguration", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseSourceConfiguration": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.firehose#DatabaseType", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Endpoint": { + "target": "com.amazonaws.firehose#DatabaseEndpoint", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Port": { + "target": "com.amazonaws.firehose#DatabasePort", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "SSLMode": { + "target": "com.amazonaws.firehose#SSLMode", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Databases": { + "target": "com.amazonaws.firehose#DatabaseList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Tables": { + "target": "com.amazonaws.firehose#DatabaseTableList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "Columns": { + "target": "com.amazonaws.firehose#DatabaseColumnList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SurrogateKeys": { + "target": "com.amazonaws.firehose#DatabaseSurrogateKeyList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SnapshotWatermarkTable": { + "target": "com.amazonaws.firehose#DatabaseTableName", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "DatabaseSourceAuthenticationConfiguration": { + "target": "com.amazonaws.firehose#DatabaseSourceAuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + }, + "DatabaseSourceVPCConfiguration": { + "target": "com.amazonaws.firehose#DatabaseSourceVPCConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseSourceDescription": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.firehose#DatabaseType", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Endpoint": { + "target": "com.amazonaws.firehose#DatabaseEndpoint", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Port": { + "target": "com.amazonaws.firehose#DatabasePort", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SSLMode": { + "target": "com.amazonaws.firehose#SSLMode", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Databases": { + "target": "com.amazonaws.firehose#DatabaseList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Tables": { + "target": "com.amazonaws.firehose#DatabaseTableList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Columns": { + "target": "com.amazonaws.firehose#DatabaseColumnList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SurrogateKeys": { + "target": "com.amazonaws.firehose#DatabaseColumnIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SnapshotWatermarkTable": { + "target": "com.amazonaws.firehose#DatabaseTableName", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "SnapshotInfo": { + "target": "com.amazonaws.firehose#DatabaseSnapshotInfoList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "DatabaseSourceAuthenticationConfiguration": { + "target": "com.amazonaws.firehose#DatabaseSourceAuthenticationConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "DatabaseSourceVPCConfiguration": { + "target": "com.amazonaws.firehose#DatabaseSourceVPCConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseSourceVPCConfiguration": { + "type": "structure", + "members": { + "VpcEndpointServiceName": { + "target": "com.amazonaws.firehose#VpcEndpointServiceName", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseSurrogateKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace" + } + }, + "com.amazonaws.firehose#DatabaseTableIncludeOrExcludeList": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#DatabaseTableName" + } + }, + "com.amazonaws.firehose#DatabaseTableList": { + "type": "structure", + "members": { + "Include": { + "target": "com.amazonaws.firehose#DatabaseTableIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "Exclude": { + "target": "com.amazonaws.firehose#DatabaseTableIncludeOrExcludeList", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#DatabaseTableName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 129 + }, + "smithy.api#pattern": "^[\\u0001-\\uFFFF]*$" + } + }, + "com.amazonaws.firehose#DatabaseType": { + "type": "enum", + "members": { + "MySQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "MySQL" + } + }, + "PostgreSQL": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PostgreSQL" + } + } + } + }, "com.amazonaws.firehose#DefaultDocumentIdFormat": { "type": "enum", "members": { @@ -1180,7 +1586,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Deletes a delivery stream and its data.

    \n

    You can delete a delivery stream only if it is in one of the following states:\n ACTIVE, DELETING, CREATING_FAILED, or\n DELETING_FAILED. You can't delete a delivery stream that is in the\n CREATING state. To check the state of a delivery stream, use DescribeDeliveryStream.

    \n

    DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the delivery stream is marked for deletion, and it goes into the \n DELETING state.While the delivery stream is in the DELETING state, the service might\n continue to accept records, but it doesn't make any guarantees with respect to delivering\n the data. Therefore, as a best practice, first stop any applications that are sending\n records before you delete a delivery stream.

    \n

    Removal of a delivery stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the \n DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state \n to be removed.

    " + "smithy.api#documentation": "

    Deletes a Firehose stream and its data.

    \n

    You can delete a Firehose stream only if it is in one of the following states:\n ACTIVE, DELETING, CREATING_FAILED, or\n DELETING_FAILED. You can't delete a Firehose stream that is in the\n CREATING state. To check the state of a Firehose stream, use DescribeDeliveryStream.

    \n

    DeleteDeliveryStream is an asynchronous API. When an API request to DeleteDeliveryStream succeeds, the Firehose stream is marked for deletion, and it goes into the \n DELETING state.While the Firehose stream is in the DELETING state, the service might\n continue to accept records, but it doesn't make any guarantees with respect to delivering\n the data. Therefore, as a best practice, first stop any applications that are sending\n records before you delete a Firehose stream.

    \n

    Removal of a Firehose stream that is in the DELETING state is a low priority operation for the service. A stream may remain in the \n DELETING state for several minutes. Therefore, as a best practice, applications should not wait for streams in the DELETING state \n to be removed.

    " } }, "com.amazonaws.firehose#DeleteDeliveryStreamInput": { @@ -1189,14 +1595,14 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, "AllowForceDelete": { "target": "com.amazonaws.firehose#BooleanObject", "traits": { - "smithy.api#documentation": "

    Set this to true if you want to delete the delivery stream even if Firehose\n is unable to retire the grant for the CMK. Firehose might be unable to retire\n the grant due to a customer error, such as when the CMK or the grant are in an invalid\n state. If you force deletion, you can then use the RevokeGrant operation to\n revoke the grant you gave to Firehose. If a failure to retire the grant\n happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the\n delete operation.

    \n

    The default value is false.

    " + "smithy.api#documentation": "

    Set this to true if you want to delete the Firehose stream even if Firehose\n is unable to retire the grant for the CMK. Firehose might be unable to retire\n the grant due to a customer error, such as when the CMK or the grant are in an invalid\n state. If you force deletion, you can then use the RevokeGrant operation to\n revoke the grant you gave to Firehose. If a failure to retire the grant\n happens due to an Amazon Web Services KMS issue, Firehose keeps retrying the\n delete operation.

    \n

    The default value is false.

    " } } }, @@ -1221,7 +1627,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:firehose:[a-zA-Z0-9\\-]+:\\d{12}:deliverystream/[a-zA-Z0-9._-]+$" } }, "com.amazonaws.firehose#DeliveryStreamDescription": { @@ -1230,21 +1636,21 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, "DeliveryStreamARN": { "target": "com.amazonaws.firehose#DeliveryStreamARN", "traits": { - "smithy.api#documentation": "

    The Amazon Resource Name (ARN) of the delivery stream. For more information, see\n Amazon\n Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    ", + "smithy.api#documentation": "

    The Amazon Resource Name (ARN) of the Firehose stream. For more information, see\n Amazon\n Resource Names (ARNs) and Amazon Web Services Service Namespaces.

    ", "smithy.api#required": {} } }, "DeliveryStreamStatus": { "target": "com.amazonaws.firehose#DeliveryStreamStatus", "traits": { - "smithy.api#documentation": "

    The status of the delivery stream. If the status of a delivery stream is\n CREATING_FAILED, this status doesn't change, and you can't invoke\n CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    ", + "smithy.api#documentation": "

    The status of the Firehose stream. If the status of a Firehose stream is\n CREATING_FAILED, this status doesn't change, and you can't invoke\n CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it.

    ", "smithy.api#required": {} } }, @@ -1257,33 +1663,33 @@ "DeliveryStreamEncryptionConfiguration": { "target": "com.amazonaws.firehose#DeliveryStreamEncryptionConfiguration", "traits": { - "smithy.api#documentation": "

    Indicates the server-side encryption (SSE) status for the delivery stream.

    " + "smithy.api#documentation": "

    Indicates the server-side encryption (SSE) status for the Firehose stream.

    " } }, "DeliveryStreamType": { "target": "com.amazonaws.firehose#DeliveryStreamType", "traits": { - "smithy.api#documentation": "

    The delivery stream type. This can be one of the following values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the delivery stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The delivery stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    ", + "smithy.api#documentation": "

    The Firehose stream type. This can be one of the following values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the Firehose stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The Firehose stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    ", "smithy.api#required": {} } }, "VersionId": { "target": "com.amazonaws.firehose#DeliveryStreamVersionId", "traits": { - "smithy.api#documentation": "

    Each time the destination is updated for a delivery stream, the version ID is\n changed, and the current version ID is required when updating the destination. This is so\n that the service knows it is applying the changes to the correct version of the delivery\n stream.

    ", + "smithy.api#documentation": "

    Each time the destination is updated for a Firehose stream, the version ID is\n changed, and the current version ID is required when updating the destination. This is so\n that the service knows it is applying the changes to the correct version of the delivery\n stream.

    ", "smithy.api#required": {} } }, "CreateTimestamp": { "target": "com.amazonaws.firehose#Timestamp", "traits": { - "smithy.api#documentation": "

    The date and time that the delivery stream was created.

    " + "smithy.api#documentation": "

    The date and time that the Firehose stream was created.

    " } }, "LastUpdateTimestamp": { "target": "com.amazonaws.firehose#Timestamp", "traits": { - "smithy.api#documentation": "

    The date and time that the delivery stream was last updated.

    " + "smithy.api#documentation": "

    The date and time that the Firehose stream was last updated.

    " } }, "Source": { @@ -1308,7 +1714,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Contains information about a delivery stream.

    " + "smithy.api#documentation": "

    Contains information about a Firehose stream.

    " } }, "com.amazonaws.firehose#DeliveryStreamEncryptionConfiguration": { @@ -1329,7 +1735,7 @@ "Status": { "target": "com.amazonaws.firehose#DeliveryStreamEncryptionStatus", "traits": { - "smithy.api#documentation": "

    This is the server-side encryption (SSE) status for the delivery stream. For a full\n description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED\n or DISABLING_FAILED, it is the status of the most recent attempt to enable or\n disable SSE, respectively.

    " + "smithy.api#documentation": "

    This is the server-side encryption (SSE) status for the Firehose stream. For a full\n description of the different values of this status, see StartDeliveryStreamEncryption and StopDeliveryStreamEncryption. If this status is ENABLING_FAILED\n or DISABLING_FAILED, it is the status of the most recent attempt to enable or\n disable SSE, respectively.

    " } }, "FailureDescription": { @@ -1355,7 +1761,7 @@ "KeyType": { "target": "com.amazonaws.firehose#KeyType", "traits": { - "smithy.api#documentation": "

    Indicates the type of customer master key (CMK) to use for encryption. The default\n setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see\n Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with\n KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the\n Amazon KMS operation CreateGrant to create a grant\n that allows the Firehose service to use the customer managed CMK to perform\n encryption and decryption. Firehose manages that grant.

    \n

    When you invoke StartDeliveryStreamEncryption to change the CMK for a\n delivery stream that is encrypted with a customer managed CMK, Firehose\n schedules the grant it had on the old CMK for retirement.

    \n

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 delivery streams. If\n a CreateDeliveryStream or StartDeliveryStreamEncryption\n operation exceeds this limit, Firehose throws a\n LimitExceededException.

    \n \n

    To encrypt your delivery stream, use symmetric CMKs. Firehose doesn't\n support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About\n Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management\n Service developer guide.

    \n
    ", + "smithy.api#documentation": "

    Indicates the type of customer master key (CMK) to use for encryption. The default\n setting is Amazon Web Services_OWNED_CMK. For more information about CMKs, see\n Customer Master Keys (CMKs). When you invoke CreateDeliveryStream or StartDeliveryStreamEncryption with\n KeyType set to CUSTOMER_MANAGED_CMK, Firehose invokes the\n Amazon KMS operation CreateGrant to create a grant\n that allows the Firehose service to use the customer managed CMK to perform\n encryption and decryption. Firehose manages that grant.

    \n

    When you invoke StartDeliveryStreamEncryption to change the CMK for a\n Firehose stream that is encrypted with a customer managed CMK, Firehose\n schedules the grant it had on the old CMK for retirement.

    \n

    You can use a CMK of type CUSTOMER_MANAGED_CMK to encrypt up to 500 Firehose streams. If\n a CreateDeliveryStream or StartDeliveryStreamEncryption\n operation exceeds this limit, Firehose throws a\n LimitExceededException.

    \n \n

    To encrypt your Firehose stream, use symmetric CMKs. Firehose doesn't\n support asymmetric CMKs. For information about symmetric and asymmetric CMKs, see About\n Symmetric and Asymmetric CMKs in the Amazon Web Services Key Management\n Service developer guide.

    \n
    ", "smithy.api#required": {} } } @@ -1408,6 +1814,18 @@ "com.amazonaws.firehose#DeliveryStreamFailureType": { "type": "enum", "members": { + "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VPC_ENDPOINT_SERVICE_NAME_NOT_FOUND" + } + }, + "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "VPC_INTERFACE_ENDPOINT_SERVICE_ACCESS_DENIED" + } + }, "RETIRE_KMS_GRANT_FAILED": { "target": "smithy.api#Unit", "traits": { @@ -1571,6 +1989,12 @@ "traits": { "smithy.api#enumValue": "MSKAsSource" } + }, + "DatabaseAsSource": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "DatabaseAsSource" + } } } }, @@ -1598,7 +2022,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Describes the specified delivery stream and its status. For example, after your\n delivery stream is created, call DescribeDeliveryStream to see whether the\n delivery stream is ACTIVE and therefore ready for data to be sent to it.

    \n

    If the status of a delivery stream is CREATING_FAILED, this status\n doesn't change, and you can't invoke CreateDeliveryStream again on it.\n However, you can invoke the DeleteDeliveryStream operation to delete it.\n If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

    " + "smithy.api#documentation": "

    Describes the specified Firehose stream and its status. For example, after your\n Firehose stream is created, call DescribeDeliveryStream to see whether the\n Firehose stream is ACTIVE and therefore ready for data to be sent to it.

    \n

    If the status of a Firehose stream is CREATING_FAILED, this status\n doesn't change, and you can't invoke CreateDeliveryStream again on it.\n However, you can invoke the DeleteDeliveryStream operation to delete it.\n If the status is DELETING_FAILED, you can force deletion by invoking DeleteDeliveryStream again but with DeleteDeliveryStreamInput$AllowForceDelete set to true.

    " } }, "com.amazonaws.firehose#DescribeDeliveryStreamInput": { @@ -1607,20 +2031,20 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, "Limit": { "target": "com.amazonaws.firehose#DescribeDeliveryStreamInputLimit", "traits": { - "smithy.api#documentation": "

    The limit on the number of destinations to return. You can have one destination per\n delivery stream.

    " + "smithy.api#documentation": "

    The limit on the number of destinations to return. You can have one destination per\n Firehose stream.

    " } }, "ExclusiveStartDestinationId": { "target": "com.amazonaws.firehose#DestinationId", "traits": { - "smithy.api#documentation": "

    The ID of the destination to start returning the destination information. Firehose supports one destination per delivery stream.

    " + "smithy.api#documentation": "

    The ID of the destination to start returning the destination information. Firehose supports one destination per Firehose stream.

    " } } }, @@ -1643,7 +2067,7 @@ "DeliveryStreamDescription": { "target": "com.amazonaws.firehose#DeliveryStreamDescription", "traits": { - "smithy.api#documentation": "

    Information about the delivery stream.

    ", + "smithy.api#documentation": "

    Information about the Firehose stream.

    ", "smithy.api#required": {} } } @@ -1739,12 +2163,12 @@ "IcebergDestinationDescription": { "target": "com.amazonaws.firehose#IcebergDestinationDescription", "traits": { - "smithy.api#documentation": "

    \n Describes a destination in Apache Iceberg Tables.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes a destination in Apache Iceberg Tables.\n

    " } } }, "traits": { - "smithy.api#documentation": "

    Describes the destination for a delivery stream.

    " + "smithy.api#documentation": "

    Describes the destination for a Firehose stream.

    " } }, "com.amazonaws.firehose#DestinationDescriptionList": { @@ -1767,34 +2191,40 @@ "type": "structure", "members": { "DestinationTableName": { - "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "target": "com.amazonaws.firehose#StringWithLettersDigitsUnderscoresDots", "traits": { - "smithy.api#documentation": "

    \n Specifies the name of the Apache Iceberg Table.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#documentation": "

    \n Specifies the name of the Apache Iceberg Table.\n

    ", "smithy.api#required": {} } }, "DestinationDatabaseName": { - "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "target": "com.amazonaws.firehose#StringWithLettersDigitsUnderscoresDots", "traits": { - "smithy.api#documentation": "

    \n The name of the Apache Iceberg database.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#documentation": "

    \n The name of the Apache Iceberg database.\n

    ", "smithy.api#required": {} } }, "UniqueKeys": { "target": "com.amazonaws.firehose#ListOfNonEmptyStringsWithoutWhitespace", "traits": { - "smithy.api#documentation": "

    \n A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create/Update/Delete operations on the given Iceberg table. \n \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n A list of unique keys for a given Apache Iceberg table. Firehose will use these for running Create, Update, or Delete operations on the given Iceberg table. \n \n

    " + } + }, + "PartitionSpec": { + "target": "com.amazonaws.firehose#PartitionSpec", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, "S3ErrorOutputPrefix": { "target": "com.amazonaws.firehose#ErrorOutputPrefix", "traits": { - "smithy.api#documentation": "

    \n The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination. \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n The table specific S3 error output prefix. All the errors that occurred while delivering to this table will be prefixed with this value in S3 destination. \n

    " } } }, "traits": { - "smithy.api#documentation": "

    \n Describes the configuration of a destination in Apache Iceberg Tables.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes the configuration of a destination in Apache Iceberg Tables.\n

    " } }, "com.amazonaws.firehose#DestinationTableConfigurationList": { @@ -1830,7 +2260,7 @@ "Enabled": { "target": "com.amazonaws.firehose#BooleanObject", "traits": { - "smithy.api#documentation": "

    Specifies that the dynamic partitioning is enabled for this Firehose\n delivery stream.

    " + "smithy.api#documentation": "

    Specifies that the dynamic partitioning is enabled for this Firehose\n Firehose stream.

    " } } }, @@ -1850,7 +2280,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#ElasticsearchBufferingSizeInMBs", "traits": { - "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the delivery stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the Firehose stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } } }, @@ -1942,7 +2372,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#ElasticsearchS3BackupMode", "traits": { - "smithy.api#documentation": "

    Defines how documents should be delivered to Amazon S3. When it is set to\n FailedDocumentsOnly, Firehose writes any documents that could\n not be indexed to the configured Amazon S3 destination, with\n AmazonOpenSearchService-failed/ appended to the key prefix. When set to\n AllDocuments, Firehose delivers all incoming records to Amazon\n S3, and also writes failed documents with AmazonOpenSearchService-failed/\n appended to the prefix. For more information, see Amazon S3 Backup for the\n Amazon ES Destination. Default value is\n FailedDocumentsOnly.

    \n

    You can't change this backup mode after you create the delivery stream.

    " + "smithy.api#documentation": "

    Defines how documents should be delivered to Amazon S3. When it is set to\n FailedDocumentsOnly, Firehose writes any documents that could\n not be indexed to the configured Amazon S3 destination, with\n AmazonOpenSearchService-failed/ appended to the key prefix. When set to\n AllDocuments, Firehose delivers all incoming records to Amazon\n S3, and also writes failed documents with AmazonOpenSearchService-failed/\n appended to the prefix. For more information, see Amazon S3 Backup for the\n Amazon ES Destination. Default value is\n FailedDocumentsOnly.

    \n

    You can't change this backup mode after you create the Firehose stream.

    " } }, "S3Configuration": { @@ -1961,7 +2391,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "VpcConfiguration": { @@ -2103,7 +2533,7 @@ "TypeName": { "target": "com.amazonaws.firehose#ElasticsearchTypeName", "traits": { - "smithy.api#documentation": "

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per\n index. If you try to specify a new type for an existing index that already has another\n type, Firehose returns an error during runtime.

    \n

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your delivery stream,\n Firehose still delivers data to Elasticsearch with the old index name and type\n name. If you want to update your delivery stream with a new index name, provide an empty\n string for TypeName.

    " + "smithy.api#documentation": "

    The Elasticsearch type name. For Elasticsearch 6.x, there can be only one type per\n index. If you try to specify a new type for an existing index that already has another\n type, Firehose returns an error during runtime.

    \n

    If you upgrade Elasticsearch from 6.x to 7.x and don’t update your Firehose stream,\n Firehose still delivers data to Elasticsearch with the old index name and type\n name. If you want to update your Firehose stream with a new index name, provide an empty\n string for TypeName.

    " } }, "IndexRotationPeriod": { @@ -2139,7 +2569,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The CloudWatch logging options for your Firehose stream.

    " } }, "DocumentIdOptions": { @@ -2160,7 +2590,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:es:[a-zA-Z0-9\\-]+:\\d{12}:domain/[a-z][-0-9a-z]{2,27}$" } }, "com.amazonaws.firehose#ElasticsearchIndexName": { @@ -2344,7 +2774,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "ProcessingConfiguration": { @@ -2356,7 +2786,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#S3BackupMode", "traits": { - "smithy.api#documentation": "

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to\n enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the\n delivery stream to disable it.

    " + "smithy.api#documentation": "

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to\n enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the\n Firehose stream to disable it.

    " } }, "S3BackupConfiguration": { @@ -2447,7 +2877,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "ProcessingConfiguration": { @@ -2545,7 +2975,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "ProcessingConfiguration": { @@ -2557,7 +2987,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#S3BackupMode", "traits": { - "smithy.api#documentation": "

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If\n backup is enabled, you can't update the delivery stream to disable it.

    " + "smithy.api#documentation": "

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If\n backup is enabled, you can't update the Firehose stream to disable it.

    " } }, "S3BackupUpdate": { @@ -3673,7 +4103,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:glue:.*:\\d{12}:catalog$" } }, "com.amazonaws.firehose#HECAcknowledgmentTimeoutInSeconds": { @@ -3775,7 +4205,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#HttpEndpointBufferingSizeInMBs", "traits": { - "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the delivery stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 5.

    \n

    We recommend setting this parameter to a value greater than the amount of data you\n typically ingest into the Firehose stream in 10 seconds. For example, if you typically\n ingest data at 1 MB/sec, the value should be 10 MB or higher.

    " } }, "IntervalInSeconds": { @@ -4156,7 +4586,19 @@ "DestinationTableConfigurationList": { "target": "com.amazonaws.firehose#DestinationTableConfigurationList", "traits": { - "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + } + }, + "SchemaEvolutionConfiguration": { + "target": "com.amazonaws.firehose#SchemaEvolutionConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "TableCreationConfiguration": { + "target": "com.amazonaws.firehose#TableCreationConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, "BufferingHints": { @@ -4171,7 +4613,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#IcebergS3BackupMode", "traits": { - "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,S3 backup only supports\n FailedDataOnly.

    " } }, "RetryOptions": { @@ -4180,14 +4622,14 @@ "RoleARN": { "target": "com.amazonaws.firehose#RoleARN", "traits": { - "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the Apache Iceberg tables role.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.\n

    ", "smithy.api#required": {} } }, "CatalogConfiguration": { "target": "com.amazonaws.firehose#CatalogConfiguration", "traits": { - "smithy.api#documentation": "

    \n Configuration describing where the destination Apache Iceberg Tables are persisted.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#documentation": "

    \n Configuration describing where the destination Apache Iceberg Tables are persisted.\n

    ", "smithy.api#required": {} } }, @@ -4199,7 +4641,7 @@ } }, "traits": { - "smithy.api#documentation": "

    \n Specifies the destination configure settings for Apache Iceberg Table.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Specifies the destination configure settings for Apache Iceberg Table.\n

    " } }, "com.amazonaws.firehose#IcebergDestinationDescription": { @@ -4208,7 +4650,19 @@ "DestinationTableConfigurationList": { "target": "com.amazonaws.firehose#DestinationTableConfigurationList", "traits": { - "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + } + }, + "SchemaEvolutionConfiguration": { + "target": "com.amazonaws.firehose#SchemaEvolutionConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "TableCreationConfiguration": { + "target": "com.amazonaws.firehose#TableCreationConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, "BufferingHints": { @@ -4223,7 +4677,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#IcebergS3BackupMode", "traits": { - "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly.

    " } }, "RetryOptions": { @@ -4232,13 +4686,13 @@ "RoleARN": { "target": "com.amazonaws.firehose#RoleARN", "traits": { - "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.\n

    " } }, "CatalogConfiguration": { "target": "com.amazonaws.firehose#CatalogConfiguration", "traits": { - "smithy.api#documentation": "

    \n Configuration describing where the destination Iceberg tables are persisted.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Configuration describing where the destination Iceberg tables are persisted.\n

    " } }, "S3DestinationDescription": { @@ -4246,7 +4700,7 @@ } }, "traits": { - "smithy.api#documentation": "

    \n Describes a destination in Apache Iceberg Tables.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes a destination in Apache Iceberg Tables.\n

    " } }, "com.amazonaws.firehose#IcebergDestinationUpdate": { @@ -4255,7 +4709,19 @@ "DestinationTableConfigurationList": { "target": "com.amazonaws.firehose#DestinationTableConfigurationList", "traits": { - "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg tables.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Provides a list of DestinationTableConfigurations which Firehose uses\n to deliver data to Apache Iceberg Tables. Firehose will write data with insert if table specific configuration is not provided here.

    " + } + }, + "SchemaEvolutionConfiguration": { + "target": "com.amazonaws.firehose#SchemaEvolutionConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "TableCreationConfiguration": { + "target": "com.amazonaws.firehose#TableCreationConfiguration", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " } }, "BufferingHints": { @@ -4270,7 +4736,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#IcebergS3BackupMode", "traits": { - "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly for preview.

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    Describes how Firehose will backup records. Currently,Firehose only supports\n FailedDataOnly.

    " } }, "RetryOptions": { @@ -4279,13 +4745,13 @@ "RoleARN": { "target": "com.amazonaws.firehose#RoleARN", "traits": { - "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the Apache Iceberg Tables role.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n The Amazon Resource Name (ARN) of the IAM role to be assumed by Firehose for calling Apache Iceberg Tables.\n

    " } }, "CatalogConfiguration": { "target": "com.amazonaws.firehose#CatalogConfiguration", "traits": { - "smithy.api#documentation": "

    \n Configuration describing where the destination Iceberg tables are persisted.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Configuration describing where the destination Iceberg tables are persisted.\n

    " } }, "S3Configuration": { @@ -4293,7 +4759,7 @@ } }, "traits": { - "smithy.api#documentation": "

    \n Describes an update for a destination in Apache Iceberg Tables.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes an update for a destination in Apache Iceberg Tables.\n

    " } }, "com.amazonaws.firehose#IcebergS3BackupMode": { @@ -4362,7 +4828,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Firehose throws this exception when an attempt to put records or to start\n or stop delivery stream encryption fails. This happens when the KMS service throws one of\n the following exception types: AccessDeniedException,\n InvalidStateException, DisabledException, or\n NotFoundException.

    ", + "smithy.api#documentation": "

    Firehose throws this exception when an attempt to put records or to start\n or stop Firehose stream encryption fails. This happens when the KMS service throws one of\n the following exception types: AccessDeniedException,\n InvalidStateException, DisabledException, or\n NotFoundException.

    ", "smithy.api#error": "client" } }, @@ -4420,7 +4886,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:kinesis:[a-zA-Z0-9\\-]+:\\d{12}:stream/[a-zA-Z0-9_.-]+$" } }, "com.amazonaws.firehose#KinesisStreamSourceConfiguration": { @@ -4442,7 +4908,7 @@ } }, "traits": { - "smithy.api#documentation": "

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as\n the source for a delivery stream.

    " + "smithy.api#documentation": "

    The stream and role Amazon Resource Names (ARNs) for a Kinesis data stream used as\n the source for a Firehose stream.

    " } }, "com.amazonaws.firehose#KinesisStreamSourceDescription": { @@ -4468,7 +4934,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Details about a Kinesis data stream used as the source for a Firehose\n delivery stream.

    " + "smithy.api#documentation": "

    Details about a Kinesis data stream used as the source for a Firehose\n Firehose stream.

    " } }, "com.amazonaws.firehose#LimitExceededException": { @@ -4495,7 +4961,7 @@ "target": "com.amazonaws.firehose#ListDeliveryStreamsOutput" }, "traits": { - "smithy.api#documentation": "

    Lists your delivery streams in alphabetical order of their names.

    \n

    The number of delivery streams might be too large to return using a single call to\n ListDeliveryStreams. You can limit the number of delivery streams returned,\n using the Limit parameter. To determine whether there are more delivery\n streams to list, check the value of HasMoreDeliveryStreams in the output. If\n there are more delivery streams to list, you can request them by calling this operation\n again and setting the ExclusiveStartDeliveryStreamName parameter to the name\n of the last delivery stream returned in the last call.

    " + "smithy.api#documentation": "

    Lists your Firehose streams in alphabetical order of their names.

    \n

    The number of Firehose streams might be too large to return using a single call to\n ListDeliveryStreams. You can limit the number of Firehose streams returned,\n using the Limit parameter. To determine whether there are more delivery\n streams to list, check the value of HasMoreDeliveryStreams in the output. If\n there are more Firehose streams to list, you can request them by calling this operation\n again and setting the ExclusiveStartDeliveryStreamName parameter to the name\n of the last Firehose stream returned in the last call.

    " } }, "com.amazonaws.firehose#ListDeliveryStreamsInput": { @@ -4504,19 +4970,19 @@ "Limit": { "target": "com.amazonaws.firehose#ListDeliveryStreamsInputLimit", "traits": { - "smithy.api#documentation": "

    The maximum number of delivery streams to list. The default value is 10.

    " + "smithy.api#documentation": "

    The maximum number of Firehose streams to list. The default value is 10.

    " } }, "DeliveryStreamType": { "target": "com.amazonaws.firehose#DeliveryStreamType", "traits": { - "smithy.api#documentation": "

    The delivery stream type. This can be one of the following values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the delivery stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The delivery stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    \n

    This parameter is optional. If this parameter is omitted, delivery streams of all\n types are returned.

    " + "smithy.api#documentation": "

    The Firehose stream type. This can be one of the following values:

    \n
      \n
    • \n

      \n DirectPut: Provider applications access the Firehose stream\n directly.

      \n
    • \n
    • \n

      \n KinesisStreamAsSource: The Firehose stream uses a Kinesis data\n stream as a source.

      \n
    • \n
    \n

    This parameter is optional. If this parameter is omitted, Firehose streams of all\n types are returned.

    " } }, "ExclusiveStartDeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The list of delivery streams returned by this call to\n ListDeliveryStreams will start with the delivery stream whose name comes\n alphabetically immediately after the name you specify in\n ExclusiveStartDeliveryStreamName.

    " + "smithy.api#documentation": "

    The list of Firehose streams returned by this call to\n ListDeliveryStreams will start with the Firehose stream whose name comes\n alphabetically immediately after the name you specify in\n ExclusiveStartDeliveryStreamName.

    " } } }, @@ -4539,14 +5005,14 @@ "DeliveryStreamNames": { "target": "com.amazonaws.firehose#DeliveryStreamNameList", "traits": { - "smithy.api#documentation": "

    The names of the delivery streams.

    ", + "smithy.api#documentation": "

    The names of the Firehose streams.

    ", "smithy.api#required": {} } }, "HasMoreDeliveryStreams": { "target": "com.amazonaws.firehose#BooleanObject", "traits": { - "smithy.api#documentation": "

    Indicates whether there are more delivery streams available to list.

    ", + "smithy.api#documentation": "

    Indicates whether there are more Firehose streams available to list.

    ", "smithy.api#required": {} } } @@ -4587,7 +5053,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Lists the tags for the specified delivery stream. This operation has a limit of five\n transactions per second per account.

    " + "smithy.api#documentation": "

    Lists the tags for the specified Firehose stream. This operation has a limit of five\n transactions per second per account.

    " } }, "com.amazonaws.firehose#ListTagsForDeliveryStreamInput": { @@ -4596,7 +5062,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream whose tags you want to list.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream whose tags you want to list.

    ", "smithy.api#required": {} } }, @@ -4609,7 +5075,7 @@ "Limit": { "target": "com.amazonaws.firehose#ListTagsForDeliveryStreamInputLimit", "traits": { - "smithy.api#documentation": "

    The number of tags to return. If this number is less than the total number of tags\n associated with the delivery stream, HasMoreTags is set to true\n in the response. To list additional tags, set ExclusiveStartTagKey to the last\n key in the response.

    " + "smithy.api#documentation": "

    The number of tags to return. If this number is less than the total number of tags\n associated with the Firehose stream, HasMoreTags is set to true\n in the response. To list additional tags, set ExclusiveStartTagKey to the last\n key in the response.

    " } } }, @@ -4760,7 +5226,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Details about the Amazon MSK cluster used as the source for a Firehose\n delivery stream.

    " + "smithy.api#documentation": "

    Details about the Amazon MSK cluster used as the source for a Firehose\n Firehose stream.

    " } }, "com.amazonaws.firehose#NoEncryptionConfig": { @@ -5058,6 +5524,41 @@ } } }, + "com.amazonaws.firehose#PartitionField": { + "type": "structure", + "members": { + "SourceName": { + "target": "com.amazonaws.firehose#NonEmptyStringWithoutWhitespace", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, + "com.amazonaws.firehose#PartitionFields": { + "type": "list", + "member": { + "target": "com.amazonaws.firehose#PartitionField" + } + }, + "com.amazonaws.firehose#PartitionSpec": { + "type": "structure", + "members": { + "Identity": { + "target": "com.amazonaws.firehose#PartitionFields", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, "com.amazonaws.firehose#Password": { "type": "string", "traits": { @@ -5311,7 +5812,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Writes a single data record into an Amazon Firehose delivery stream. To\n write multiple data records into a delivery stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.

    \n

    By default, each delivery stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each delivery stream. For more information about limits and how to request\n an increase, see Amazon\n Firehose Limits.

    \n

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    \n

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.

    \n

    Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.

    \n

    The PutRecord operation returns a RecordId, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.

    \n

    If the PutRecord operation throws a\n ServiceUnavailableException, the API is automatically reinvoked (retried) 3\n times. If the exception persists, it is possible that the throughput limits have been\n exceeded for the delivery stream.

    \n

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.

    \n

    Data records sent to Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.

    \n \n

    Don't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.

    \n
    " + "smithy.api#documentation": "

    Writes a single data record into an Firehose stream. To\n write multiple data records into a Firehose stream, use PutRecordBatch.\n Applications using these operations are referred to as producers.

    \n

    By default, each Firehose stream can take in up to 2,000 transactions per second,\n 5,000 records per second, or 5 MB per second. If you use PutRecord and\n PutRecordBatch, the limits are an aggregate across these two\n operations for each Firehose stream. For more information about limits and how to request\n an increase, see Amazon\n Firehose Limits.

    \n

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    \n

    You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KiB in size, and any kind of data. For example, it can be a segment from a log file,\n geographic location data, website clickstream data, and so on.

    \n

    For multi record de-aggregation, you can not put more than 500 records even if the\n data blob length is less than 1000 KiB. If you include more than 500 records, the request\n succeeds but the record de-aggregation doesn't work as expected and transformation lambda\n is invoked with the complete base64 encoded data blob instead of de-aggregated base64\n decoded records.

    \n

    Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.

    \n

    The PutRecord operation returns a RecordId, which is a\n unique string assigned to each record. Producer applications can use this ID for purposes\n such as auditability and investigation.

    \n

    If the PutRecord operation throws a\n ServiceUnavailableException, the API is automatically reinvoked (retried) 3\n times. If the exception persists, it is possible that the throughput limits have been\n exceeded for the Firehose stream.

    \n

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.

    \n

    Data records sent to Firehose are stored for 24 hours from the time they\n are added to a Firehose stream as it tries to send the records to the destination. If the\n destination is unreachable for more than 24 hours, the data is no longer\n available.

    \n \n

    Don't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.

    \n
    " } }, "com.amazonaws.firehose#PutRecordBatch": { @@ -5340,7 +5841,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Writes multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a delivery stream, use PutRecord. Applications using\n these operations are referred to as producers.

    \n

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a delivery stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    \n

    For information about service quota, see Amazon Firehose\n Quota.

    \n

    Each PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.

    \n

    You must specify the name of the delivery stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.

    \n

    Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.

    \n

    The PutRecordBatch response includes a count of failed records,\n FailedPutCount, and an array of responses, RequestResponses.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses includes both successfully and unsuccessfully processed\n records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.

    \n

    A successfully processed record includes a RecordId value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode\n and ErrorMessage values. ErrorCode reflects the type of error,\n and is one of the following values: ServiceUnavailableException or\n InternalFailure. ErrorMessage provides more detailed\n information about the error.

    \n

    If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.

    \n

    If PutRecordBatch throws ServiceUnavailableException,\n the API is automatically reinvoked (retried) 3 times. If the exception persists, it is\n possible that the throughput limits have been exceeded for the delivery stream.

    \n

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.

    \n

    Data records sent to Firehose are stored for 24 hours from the time they\n are added to a delivery stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.

    \n \n

    Don't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.

    \n
    " + "smithy.api#documentation": "

    Writes multiple data records into a Firehose stream in a single call, which can\n achieve higher throughput per producer than when writing single records. To write single\n data records into a Firehose stream, use PutRecord. Applications using\n these operations are referred to as producers.

    \n

    Firehose accumulates and publishes a particular metric for a customer account in one minute intervals. It is possible that the bursts of incoming bytes/records ingested to a Firehose stream last only for a few seconds. Due to this, the actual spikes in the traffic might not be fully visible in the customer's 1 minute CloudWatch metrics.

    \n

    For information about service quota, see Amazon Firehose\n Quota.

    \n

    Each PutRecordBatch request supports up to 500 records. Each record\n in the request can be as large as 1,000 KB (before base64 encoding), up to a limit of 4 MB\n for the entire request. These limits cannot be changed.

    \n

    You must specify the name of the Firehose stream and the data record when using PutRecord. The data record consists of a data blob that can be up to 1,000\n KB in size, and any kind of data. For example, it could be a segment from a log file,\n geographic location data, website clickstream data, and so on.

    \n

    For multi record de-aggregation, you can not put more than 500 records even if the\n data blob length is less than 1000 KiB. If you include more than 500 records, the request\n succeeds but the record de-aggregation doesn't work as expected and transformation lambda\n is invoked with the complete base64 encoded data blob instead of de-aggregated base64\n decoded records.

    \n

    Firehose buffers records before delivering them to the destination. To\n disambiguate the data blobs at the destination, a common solution is to use delimiters in\n the data, such as a newline (\\n) or some other character unique within the\n data. This allows the consumer application to parse individual data items when reading the\n data from the destination.

    \n

    The PutRecordBatch response includes a count of failed records,\n FailedPutCount, and an array of responses, RequestResponses.\n Even if the PutRecordBatch call succeeds, the value of\n FailedPutCount may be greater than 0, indicating that there are records for\n which the operation didn't succeed. Each entry in the RequestResponses array\n provides additional information about the processed record. It directly correlates with a\n record in the request array using the same ordering, from the top to the bottom. The\n response array always includes the same number of records as the request array.\n RequestResponses includes both successfully and unsuccessfully processed\n records. Firehose tries to process all records in each PutRecordBatch request. A single record failure does not stop the processing\n of subsequent records.

    \n

    A successfully processed record includes a RecordId value, which is\n unique for the record. An unsuccessfully processed record includes ErrorCode\n and ErrorMessage values. ErrorCode reflects the type of error,\n and is one of the following values: ServiceUnavailableException or\n InternalFailure. ErrorMessage provides more detailed\n information about the error.

    \n

    If there is an internal server error or a timeout, the write might have completed or\n it might have failed. If FailedPutCount is greater than 0, retry the request,\n resending only those records that might have failed processing. This minimizes the possible\n duplicate records and also reduces the total bytes sent (and corresponding charges). We\n recommend that you handle any duplicates at the destination.

    \n

    If PutRecordBatch throws ServiceUnavailableException,\n the API is automatically reinvoked (retried) 3 times. If the exception persists, it is\n possible that the throughput limits have been exceeded for the Firehose stream.

    \n

    Re-invoking the Put API operations (for example, PutRecord and PutRecordBatch) can\n result in data duplicates. For larger data assets, allow for a longer time out before\n retrying Put API operations.

    \n

    Data records sent to Firehose are stored for 24 hours from the time they\n are added to a Firehose stream as it attempts to send the records to the destination. If\n the destination is unreachable for more than 24 hours, the data is no longer\n available.

    \n \n

    Don't concatenate two or more base64 strings to form the data fields of your records.\n Instead, concatenate the raw data, then perform base64 encoding.

    \n
    " } }, "com.amazonaws.firehose#PutRecordBatchInput": { @@ -5349,7 +5850,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, @@ -5428,7 +5929,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Contains the result for an individual record from a PutRecordBatch\n request. If the record is successfully added to your delivery stream, it receives a record\n ID. If the record fails to be added to your delivery stream, the result includes an error\n code and an error message.

    " + "smithy.api#documentation": "

    Contains the result for an individual record from a PutRecordBatch\n request. If the record is successfully added to your Firehose stream, it receives a record\n ID. If the record fails to be added to your Firehose stream, the result includes an error\n code and an error message.

    " } }, "com.amazonaws.firehose#PutRecordBatchResponseEntryList": { @@ -5449,7 +5950,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, @@ -5509,7 +6010,7 @@ } }, "traits": { - "smithy.api#documentation": "

    The unit of data in a delivery stream.

    " + "smithy.api#documentation": "

    The unit of data in a Firehose stream.

    " } }, "com.amazonaws.firehose#RedshiftDestinationConfiguration": { @@ -5570,7 +6071,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#RedshiftS3BackupMode", "traits": { - "smithy.api#documentation": "

    The Amazon S3 backup mode. After you create a delivery stream, you can update it to\n enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the\n delivery stream to disable it.

    " + "smithy.api#documentation": "

    The Amazon S3 backup mode. After you create a Firehose stream, you can update it to\n enable Amazon S3 backup if it is disabled. If backup is enabled, you can't update the\n Firehose stream to disable it.

    " } }, "S3BackupConfiguration": { @@ -5582,7 +6083,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The CloudWatch logging options for your Firehose stream.

    " } }, "SecretsManagerConfiguration": { @@ -5660,7 +6161,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "SecretsManagerConfiguration": { @@ -5728,7 +6229,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#RedshiftS3BackupMode", "traits": { - "smithy.api#documentation": "

    You can update a delivery stream to enable Amazon S3 backup if it is disabled. If\n backup is enabled, you can't update the delivery stream to disable it.

    " + "smithy.api#documentation": "

    You can update a Firehose stream to enable Amazon S3 backup if it is disabled. If\n backup is enabled, you can't update the Firehose stream to disable it.

    " } }, "S3BackupUpdate": { @@ -5740,7 +6241,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "SecretsManagerConfiguration": { @@ -5839,12 +6340,12 @@ "DurationInSeconds": { "target": "com.amazonaws.firehose#RetryDurationInSeconds", "traits": { - "smithy.api#documentation": "

    The period of time during which Firehose retries to deliver data to the\n specified Amazon S3 prefix.

    " + "smithy.api#documentation": "

    The period of time during which Firehose retries to deliver data to the\n specified destination.

    " } } }, "traits": { - "smithy.api#documentation": "

    The retry behavior in case Firehose is unable to deliver data to an Amazon\n S3 prefix.

    " + "smithy.api#documentation": "

    The retry behavior in case Firehose is unable to deliver data to a destination.

    " } }, "com.amazonaws.firehose#RoleARN": { @@ -5854,7 +6355,7 @@ "min": 1, "max": 512 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:iam::\\d{12}:role/[a-zA-Z_0-9+=,.@\\-_/]+$" } }, "com.amazonaws.firehose#S3BackupMode": { @@ -5924,7 +6425,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The CloudWatch logging options for your Firehose stream.

    " } } }, @@ -5985,7 +6486,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } } }, @@ -6041,7 +6542,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The CloudWatch logging options for your Firehose stream.

    " } } }, @@ -6049,6 +6550,23 @@ "smithy.api#documentation": "

    Describes an update for a destination in Amazon S3.

    " } }, + "com.amazonaws.firehose#SSLMode": { + "type": "enum", + "members": { + "Disabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Disabled" + } + }, + "Enabled": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "Enabled" + } + } + } + }, "com.amazonaws.firehose#SchemaConfiguration": { "type": "structure", "members": { @@ -6093,6 +6611,21 @@ "smithy.api#documentation": "

    Specifies the schema to which you want Firehose to configure your data\n before it writes it to Amazon S3. This parameter is required if Enabled is set\n to true.

    " } }, + "com.amazonaws.firehose#SchemaEvolutionConfiguration": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.firehose#BooleanObject", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, "com.amazonaws.firehose#SecretARN": { "type": "string", "traits": { @@ -6100,7 +6633,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:" + "smithy.api#pattern": "^arn:.*:secretsmanager:[a-zA-Z0-9\\-]+:\\d{12}:secret:[a-zA-Z0-9\\-/_+=.@]+$" } }, "com.amazonaws.firehose#SecretsManagerConfiguration": { @@ -6109,7 +6642,7 @@ "SecretARN": { "target": "com.amazonaws.firehose#SecretARN", "traits": { - "smithy.api#documentation": "

    The ARN of the secret that stores your credentials. It must be in the same region as the\n Firehose stream and the role. The secret ARN can reside in a different account than the delivery stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    " + "smithy.api#documentation": "

    The ARN of the secret that stores your credentials. It must be in the same region as the\n Firehose stream and the role. The secret ARN can reside in a different account than the Firehose stream and role as Firehose supports cross-account secret access. This parameter is required when Enabled is set to True.

    " } }, "RoleARN": { @@ -6121,7 +6654,7 @@ "Enabled": { "target": "com.amazonaws.firehose#BooleanObject", "traits": { - "smithy.api#documentation": "

    Specifies whether you want to use the the secrets manager feature. When set as\n True the secrets manager configuration overwrites the existing secrets in\n the destination configuration. When it's set to False Firehose falls back to\n the credentials in the destination configuration.

    ", + "smithy.api#documentation": "

    Specifies whether you want to use the secrets manager feature. When set as\n True the secrets manager configuration overwrites the existing secrets in\n the destination configuration. When it's set to False Firehose falls back to\n the credentials in the destination configuration.

    ", "smithy.api#required": {} } } @@ -6173,7 +6706,7 @@ } }, "traits": { - "smithy.api#documentation": "

    The service is unavailable. Back off and retry the operation. If you continue to see\n the exception, throughput limits for the delivery stream may have been exceeded. For more\n information about limits and how to request an increase, see Amazon Firehose\n Limits.

    ", + "smithy.api#documentation": "

    The service is unavailable. Back off and retry the operation. If you continue to see\n the exception, throughput limits for the Firehose stream may have been exceeded. For more\n information about limits and how to request an increase, see Amazon Firehose\n Limits.

    ", "smithy.api#error": "server", "smithy.api#httpError": 503 } @@ -6187,6 +6720,46 @@ } } }, + "com.amazonaws.firehose#SnapshotRequestedBy": { + "type": "enum", + "members": { + "USER": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "USER" + } + }, + "FIREHOSE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FIREHOSE" + } + } + } + }, + "com.amazonaws.firehose#SnapshotStatus": { + "type": "enum", + "members": { + "IN_PROGRESS": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "IN_PROGRESS" + } + }, + "COMPLETE": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "COMPLETE" + } + }, + "SUSPENDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUSPENDED" + } + } + } + }, "com.amazonaws.firehose#SnowflakeAccountUrl": { "type": "string", "traits": { @@ -6204,7 +6777,7 @@ "SizeInMBs": { "target": "com.amazonaws.firehose#SnowflakeBufferingSizeInMBs", "traits": { - "smithy.api#documentation": "

    \n Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 1.\n

    " + "smithy.api#documentation": "

    Buffer incoming data to the specified size, in MBs, before delivering it to the\n destination. The default value is 128.

    " } }, "IntervalInSeconds": { @@ -6603,7 +7176,7 @@ "S3BackupMode": { "target": "com.amazonaws.firehose#SnowflakeS3BackupMode", "traits": { - "smithy.api#documentation": "

    Choose an S3 backup mode

    " + "smithy.api#documentation": "

    Choose an S3 backup mode. Once you set the mode as AllData, you can not\n change it to FailedDataOnly.

    " } }, "S3Update": { @@ -6797,10 +7370,16 @@ "traits": { "smithy.api#documentation": "

    The configuration description for the Amazon MSK cluster to be used as the source for a delivery\n stream.

    " } + }, + "DatabaseSourceDescription": { + "target": "com.amazonaws.firehose#DatabaseSourceDescription", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } } }, "traits": { - "smithy.api#documentation": "

    Details about a Kinesis data stream used as the source for a Firehose\n delivery stream.

    " + "smithy.api#documentation": "

    Details about a Kinesis data stream used as the source for a Firehose\n Firehose stream.

    " } }, "com.amazonaws.firehose#SplunkBufferingHints": { @@ -6898,7 +7477,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "BufferingHints": { @@ -6972,7 +7551,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "BufferingHints": { @@ -7046,7 +7625,7 @@ "CloudWatchLoggingOptions": { "target": "com.amazonaws.firehose#CloudWatchLoggingOptions", "traits": { - "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your delivery stream.

    " + "smithy.api#documentation": "

    The Amazon CloudWatch logging options for your Firehose stream.

    " } }, "BufferingHints": { @@ -7132,7 +7711,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Enables server-side encryption (SSE) for the delivery stream.

    \n

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then\n to ENABLED. The encryption status of a delivery stream is the\n Status property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED. You\n can continue to read and write data to your delivery stream while the encryption status is\n ENABLING, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED before all records written to the\n delivery stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

    \n

    To check the encryption status of a delivery stream, use DescribeDeliveryStream.

    \n

    Even if encryption is currently enabled for a delivery stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK,\n Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.

    \n

    For the KMS grant creation to be successful, the Firehose API operations\n StartDeliveryStreamEncryption and CreateDeliveryStream should\n not be called with session credentials that are more than 6 hours old.

    \n

    If a delivery stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.

    \n

    If the encryption status of your delivery stream is ENABLING_FAILED, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Firehose to invoke KMS\n encrypt and decrypt operations.

    \n

    You can enable SSE for a delivery stream only if it's a delivery stream that uses\n DirectPut as its source.

    \n

    The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same delivery stream in a\n 24-hour period.

    " + "smithy.api#documentation": "

    Enables server-side encryption (SSE) for the Firehose stream.

    \n

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to ENABLING, and then\n to ENABLED. The encryption status of a Firehose stream is the\n Status property in DeliveryStreamEncryptionConfiguration.\n If the operation fails, the encryption status changes to ENABLING_FAILED. You\n can continue to read and write data to your Firehose stream while the encryption status is\n ENABLING, but the data is not encrypted. It can take up to 5 seconds after\n the encryption status changes to ENABLED before all records written to the\n Firehose stream are encrypted. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

    \n

    To check the encryption status of a Firehose stream, use DescribeDeliveryStream.

    \n

    Even if encryption is currently enabled for a Firehose stream, you can still invoke this\n operation on it to change the ARN of the CMK or both its type and ARN. If you invoke this\n method to change the CMK, and the old CMK is of type CUSTOMER_MANAGED_CMK,\n Firehose schedules the grant it had on the old CMK for retirement. If the new\n CMK is of type CUSTOMER_MANAGED_CMK, Firehose creates a grant\n that enables it to use the new CMK to encrypt and decrypt data and to manage the\n grant.

    \n

    For the KMS grant creation to be successful, the Firehose API operations\n StartDeliveryStreamEncryption and CreateDeliveryStream should\n not be called with session credentials that are more than 6 hours old.

    \n

    If a Firehose stream already has encryption enabled and then you invoke this operation\n to change the ARN of the CMK or both its type and ARN and you get\n ENABLING_FAILED, this only means that the attempt to change the CMK failed.\n In this case, encryption remains enabled with the old CMK.

    \n

    If the encryption status of your Firehose stream is ENABLING_FAILED, you\n can invoke this operation again with a valid CMK. The CMK must be enabled and the key\n policy mustn't explicitly deny the permission for Firehose to invoke KMS\n encrypt and decrypt operations.

    \n

    You can enable SSE for a Firehose stream only if it's a Firehose stream that uses\n DirectPut as its source.

    \n

    The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per Firehose stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same Firehose stream in a\n 24-hour period.

    " } }, "com.amazonaws.firehose#StartDeliveryStreamEncryptionInput": { @@ -7141,7 +7720,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream for which you want to enable server-side encryption\n (SSE).

    ", + "smithy.api#documentation": "

    The name of the Firehose stream for which you want to enable server-side encryption\n (SSE).

    ", "smithy.api#required": {} } }, @@ -7186,7 +7765,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Disables server-side encryption (SSE) for the delivery stream.

    \n

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then\n to DISABLED. You can continue to read and write data to your stream while its\n status is DISABLING. It can take up to 5 seconds after the encryption status\n changes to DISABLED before all records written to the delivery stream are no\n longer subject to encryption. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

    \n

    To check the encryption state of a delivery stream, use DescribeDeliveryStream.

    \n

    If SSE is enabled using a customer managed CMK and then you invoke\n StopDeliveryStreamEncryption, Firehose schedules the related\n KMS grant for retirement and then retires it after it ensures that it is finished\n delivering records to the destination.

    \n

    The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per delivery stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same delivery stream in a\n 24-hour period.

    " + "smithy.api#documentation": "

    Disables server-side encryption (SSE) for the Firehose stream.

    \n

    This operation is asynchronous. It returns immediately. When you invoke it, Firehose first sets the encryption status of the stream to DISABLING, and then\n to DISABLED. You can continue to read and write data to your stream while its\n status is DISABLING. It can take up to 5 seconds after the encryption status\n changes to DISABLED before all records written to the Firehose stream are no\n longer subject to encryption. To find out whether a record or a batch of records was\n encrypted, check the response elements PutRecordOutput$Encrypted and\n PutRecordBatchOutput$Encrypted, respectively.

    \n

    To check the encryption state of a Firehose stream, use DescribeDeliveryStream.

    \n

    If SSE is enabled using a customer managed CMK and then you invoke\n StopDeliveryStreamEncryption, Firehose schedules the related\n KMS grant for retirement and then retires it after it ensures that it is finished\n delivering records to the destination.

    \n

    The StartDeliveryStreamEncryption and\n StopDeliveryStreamEncryption operations have a combined limit of 25 calls\n per Firehose stream per 24 hours. For example, you reach the limit if you call\n StartDeliveryStreamEncryption 13 times and\n StopDeliveryStreamEncryption 12 times for the same Firehose stream in a\n 24-hour period.

    " } }, "com.amazonaws.firehose#StopDeliveryStreamEncryptionInput": { @@ -7195,7 +7774,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream for which you want to disable server-side encryption\n (SSE).

    ", + "smithy.api#documentation": "

    The name of the Firehose stream for which you want to disable server-side encryption\n (SSE).

    ", "smithy.api#required": {} } } @@ -7211,6 +7790,16 @@ "smithy.api#output": {} } }, + "com.amazonaws.firehose#StringWithLettersDigitsUnderscoresDots": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[a-zA-Z0-9\\.\\_]+$" + } + }, "com.amazonaws.firehose#SubnetIdList": { "type": "list", "member": { @@ -7223,6 +7812,21 @@ } } }, + "com.amazonaws.firehose#TableCreationConfiguration": { + "type": "structure", + "members": { + "Enabled": { + "target": "com.amazonaws.firehose#BooleanObject", + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

    \n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + } + }, "com.amazonaws.firehose#Tag": { "type": "structure", "members": { @@ -7241,7 +7845,7 @@ } }, "traits": { - "smithy.api#documentation": "

    Metadata that you can assign to a delivery stream, consisting of a key-value\n pair.

    " + "smithy.api#documentation": "

    Metadata that you can assign to a Firehose stream, consisting of a key-value\n pair.

    " } }, "com.amazonaws.firehose#TagDeliveryStream": { @@ -7267,7 +7871,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Adds or updates tags for the specified delivery stream. A tag is a key-value pair\n that you can define and assign to Amazon Web Services resources. If you specify a tag that\n already exists, the tag value is replaced with the value that you specify in the request.\n Tags are metadata. For example, you can add friendly names and descriptions or other types\n of information that can help you distinguish the delivery stream. For more information\n about tags, see Using Cost Allocation\n Tags in the Amazon Web Services Billing and Cost Management User\n Guide.

    \n

    Each delivery stream can have up to 50 tags.

    \n

    This operation has a limit of five transactions per second per account.

    " + "smithy.api#documentation": "

    Adds or updates tags for the specified Firehose stream. A tag is a key-value pair\n that you can define and assign to Amazon Web Services resources. If you specify a tag that\n already exists, the tag value is replaced with the value that you specify in the request.\n Tags are metadata. For example, you can add friendly names and descriptions or other types\n of information that can help you distinguish the Firehose stream. For more information\n about tags, see Using Cost Allocation\n Tags in the Amazon Web Services Billing and Cost Management User\n Guide.

    \n

    Each Firehose stream can have up to 50 tags.

    \n

    This operation has a limit of five transactions per second per account.

    " } }, "com.amazonaws.firehose#TagDeliveryStreamInput": { @@ -7276,7 +7880,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream to which you want to add the tags.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream to which you want to add the tags.

    ", "smithy.api#required": {} } }, @@ -7379,7 +7983,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Removes tags from the specified delivery stream. Removed tags are deleted, and you\n can't recover them after this operation successfully completes.

    \n

    If you specify a tag that doesn't exist, the operation ignores it.

    \n

    This operation has a limit of five transactions per second per account.

    " + "smithy.api#documentation": "

    Removes tags from the specified Firehose stream. Removed tags are deleted, and you\n can't recover them after this operation successfully completes.

    \n

    If you specify a tag that doesn't exist, the operation ignores it.

    \n

    This operation has a limit of five transactions per second per account.

    " } }, "com.amazonaws.firehose#UntagDeliveryStreamInput": { @@ -7388,7 +7992,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, @@ -7434,7 +8038,7 @@ } ], "traits": { - "smithy.api#documentation": "

    Updates the specified destination of the specified delivery stream.

    \n

    Use this operation to change the destination type (for example, to replace the Amazon\n S3 destination with Amazon Redshift) or change the parameters associated with a destination\n (for example, to change the bucket name of the Amazon S3 destination). The update might not\n occur immediately. The target delivery stream remains active while the configurations are\n updated, so data writes to the delivery stream can continue during this process. The\n updated configurations are usually effective within a few minutes.

    \n

    Switching between Amazon OpenSearch Service and other services is not supported. For\n an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch\n Service destination.

    \n

    If the destination type is the same, Firehose merges the configuration\n parameters specified with the destination configuration that already exists on the delivery\n stream. If any of the parameters are not specified in the call, the existing values are\n retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing\n EncryptionConfiguration is maintained on the destination.

    \n

    If the destination type is not the same, for example, changing the destination from\n Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this\n case, all parameters must be specified.

    \n

    Firehose uses CurrentDeliveryStreamVersionId to avoid race\n conditions and conflicting merges. This is a required field, and the service updates the\n configuration only if the existing configuration has a version ID that matches. After the\n update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set\n CurrentDeliveryStreamVersionId in the next call.

    " + "smithy.api#documentation": "

    Updates the specified destination of the specified Firehose stream.

    \n

    Use this operation to change the destination type (for example, to replace the Amazon\n S3 destination with Amazon Redshift) or change the parameters associated with a destination\n (for example, to change the bucket name of the Amazon S3 destination). The update might not\n occur immediately. The target Firehose stream remains active while the configurations are\n updated, so data writes to the Firehose stream can continue during this process. The\n updated configurations are usually effective within a few minutes.

    \n

    Switching between Amazon OpenSearch Service and other services is not supported. For\n an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch\n Service destination.

    \n

    If the destination type is the same, Firehose merges the configuration\n parameters specified with the destination configuration that already exists on the delivery\n stream. If any of the parameters are not specified in the call, the existing values are\n retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing\n EncryptionConfiguration is maintained on the destination.

    \n

    If the destination type is not the same, for example, changing the destination from\n Amazon S3 to Amazon Redshift, Firehose does not merge any parameters. In this\n case, all parameters must be specified.

    \n

    Firehose uses CurrentDeliveryStreamVersionId to avoid race\n conditions and conflicting merges. This is a required field, and the service updates the\n configuration only if the existing configuration has a version ID that matches. After the\n update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set\n CurrentDeliveryStreamVersionId in the next call.

    " } }, "com.amazonaws.firehose#UpdateDestinationInput": { @@ -7443,7 +8047,7 @@ "DeliveryStreamName": { "target": "com.amazonaws.firehose#DeliveryStreamName", "traits": { - "smithy.api#documentation": "

    The name of the delivery stream.

    ", + "smithy.api#documentation": "

    The name of the Firehose stream.

    ", "smithy.api#required": {} } }, @@ -7519,7 +8123,7 @@ "IcebergDestinationUpdate": { "target": "com.amazonaws.firehose#IcebergDestinationUpdate", "traits": { - "smithy.api#documentation": "

    \n Describes an update for a destination in Apache Iceberg Tables.\n

    \n

    Amazon Data Firehose is in preview release and is subject to change.

    " + "smithy.api#documentation": "

    \n Describes an update for a destination in Apache Iceberg Tables.\n

    " } } }, @@ -7551,14 +8155,14 @@ "SubnetIds": { "target": "com.amazonaws.firehose#SubnetIdList", "traits": { - "smithy.api#documentation": "

    The IDs of the subnets that you want Firehose to use to create ENIs in the\n VPC of the Amazon ES destination. Make sure that the routing tables and inbound and\n outbound rules allow traffic to flow from the subnets whose IDs are specified here to the\n subnets that have the destination Amazon ES endpoints. Firehose creates at\n least one ENI in each of the subnets that are specified here. Do not delete or modify these\n ENIs.

    \n

    The number of ENIs that Firehose creates in the subnets specified here\n scales up and down automatically based on throughput. To enable Firehose to\n scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To\n help you calculate the quota you need, assume that Firehose can create up to\n three ENIs for this delivery stream for each of the subnets specified here. For more\n information about ENI quota, see Network Interfaces\n in the Amazon VPC Quotas topic.

    ", + "smithy.api#documentation": "

    The IDs of the subnets that you want Firehose to use to create ENIs in the\n VPC of the Amazon ES destination. Make sure that the routing tables and inbound and\n outbound rules allow traffic to flow from the subnets whose IDs are specified here to the\n subnets that have the destination Amazon ES endpoints. Firehose creates at\n least one ENI in each of the subnets that are specified here. Do not delete or modify these\n ENIs.

    \n

    The number of ENIs that Firehose creates in the subnets specified here\n scales up and down automatically based on throughput. To enable Firehose to\n scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To\n help you calculate the quota you need, assume that Firehose can create up to\n three ENIs for this Firehose stream for each of the subnets specified here. For more\n information about ENI quota, see Network Interfaces\n in the Amazon VPC Quotas topic.

    ", "smithy.api#required": {} } }, "RoleARN": { "target": "com.amazonaws.firehose#RoleARN", "traits": { - "smithy.api#documentation": "

    The ARN of the IAM role that you want the delivery stream to use to create endpoints in\n the destination VPC. You can use your existing Firehose delivery role or you\n can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    \n
      \n
    • \n

      \n ec2:DescribeVpcs\n

      \n
    • \n
    • \n

      \n ec2:DescribeVpcAttribute\n

      \n
    • \n
    • \n

      \n ec2:DescribeSubnets\n

      \n
    • \n
    • \n

      \n ec2:DescribeSecurityGroups\n

      \n
    • \n
    • \n

      \n ec2:DescribeNetworkInterfaces\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterface\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterfacePermission\n

      \n
    • \n
    • \n

      \n ec2:DeleteNetworkInterface\n

      \n
    • \n
    \n \n

    When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

    \n
    ", + "smithy.api#documentation": "

    The ARN of the IAM role that you want the Firehose stream to use to create endpoints in\n the destination VPC. You can use your existing Firehose delivery role or you\n can specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    \n
      \n
    • \n

      \n ec2:DescribeVpcs\n

      \n
    • \n
    • \n

      \n ec2:DescribeVpcAttribute\n

      \n
    • \n
    • \n

      \n ec2:DescribeSubnets\n

      \n
    • \n
    • \n

      \n ec2:DescribeSecurityGroups\n

      \n
    • \n
    • \n

      \n ec2:DescribeNetworkInterfaces\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterface\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterfacePermission\n

      \n
    • \n
    • \n

      \n ec2:DeleteNetworkInterface\n

      \n
    • \n
    \n \n

    When you specify subnets for delivering data to the destination in a private VPC, make sure you have enough number of free IP addresses in chosen subnets. If there is no available free IP address in a specified subnet, Firehose cannot create or add ENIs for the data delivery in the private VPC, and the delivery will be degraded or fail.

    \n
    ", "smithy.api#required": {} } }, @@ -7580,21 +8184,21 @@ "SubnetIds": { "target": "com.amazonaws.firehose#SubnetIdList", "traits": { - "smithy.api#documentation": "

    The IDs of the subnets that Firehose uses to create ENIs in the VPC of the\n Amazon ES destination. Make sure that the routing tables and inbound and outbound rules\n allow traffic to flow from the subnets whose IDs are specified here to the subnets that\n have the destination Amazon ES endpoints. Firehose creates at least one ENI in\n each of the subnets that are specified here. Do not delete or modify these ENIs.

    \n

    The number of ENIs that Firehose creates in the subnets specified here\n scales up and down automatically based on throughput. To enable Firehose to\n scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To\n help you calculate the quota you need, assume that Firehose can create up to\n three ENIs for this delivery stream for each of the subnets specified here. For more\n information about ENI quota, see Network Interfaces\n in the Amazon VPC Quotas topic.

    ", + "smithy.api#documentation": "

    The IDs of the subnets that Firehose uses to create ENIs in the VPC of the\n Amazon ES destination. Make sure that the routing tables and inbound and outbound rules\n allow traffic to flow from the subnets whose IDs are specified here to the subnets that\n have the destination Amazon ES endpoints. Firehose creates at least one ENI in\n each of the subnets that are specified here. Do not delete or modify these ENIs.

    \n

    The number of ENIs that Firehose creates in the subnets specified here\n scales up and down automatically based on throughput. To enable Firehose to\n scale up the number of ENIs to match throughput, ensure that you have sufficient quota. To\n help you calculate the quota you need, assume that Firehose can create up to\n three ENIs for this Firehose stream for each of the subnets specified here. For more\n information about ENI quota, see Network Interfaces\n in the Amazon VPC Quotas topic.

    ", "smithy.api#required": {} } }, "RoleARN": { "target": "com.amazonaws.firehose#RoleARN", "traits": { - "smithy.api#documentation": "

    The ARN of the IAM role that the delivery stream uses to create endpoints in the\n destination VPC. You can use your existing Firehose delivery role or you can\n specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    \n
      \n
    • \n

      \n ec2:DescribeVpcs\n

      \n
    • \n
    • \n

      \n ec2:DescribeVpcAttribute\n

      \n
    • \n
    • \n

      \n ec2:DescribeSubnets\n

      \n
    • \n
    • \n

      \n ec2:DescribeSecurityGroups\n

      \n
    • \n
    • \n

      \n ec2:DescribeNetworkInterfaces\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterface\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterfacePermission\n

      \n
    • \n
    • \n

      \n ec2:DeleteNetworkInterface\n

      \n
    • \n
    \n

    If you revoke these permissions after you create the delivery stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a\n degradation in performance.

    ", + "smithy.api#documentation": "

    The ARN of the IAM role that the Firehose stream uses to create endpoints in the\n destination VPC. You can use your existing Firehose delivery role or you can\n specify a new role. In either case, make sure that the role trusts the Firehose service principal and that it grants the following permissions:

    \n
      \n
    • \n

      \n ec2:DescribeVpcs\n

      \n
    • \n
    • \n

      \n ec2:DescribeVpcAttribute\n

      \n
    • \n
    • \n

      \n ec2:DescribeSubnets\n

      \n
    • \n
    • \n

      \n ec2:DescribeSecurityGroups\n

      \n
    • \n
    • \n

      \n ec2:DescribeNetworkInterfaces\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterface\n

      \n
    • \n
    • \n

      \n ec2:CreateNetworkInterfacePermission\n

      \n
    • \n
    • \n

      \n ec2:DeleteNetworkInterface\n

      \n
    • \n
    \n

    If you revoke these permissions after you create the Firehose stream, Firehose can't scale out by creating more ENIs when necessary. You might therefore see a\n degradation in performance.

    ", "smithy.api#required": {} } }, "SecurityGroupIds": { "target": "com.amazonaws.firehose#SecurityGroupIdList", "traits": { - "smithy.api#documentation": "

    The IDs of the security groups that Firehose uses when it creates ENIs in\n the VPC of the Amazon ES destination. You can use the same security group that the Amazon\n ES domain uses or different ones. If you specify different security groups, ensure that\n they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure\n that the Amazon ES domain's security group allows HTTPS traffic from the security groups\n specified here. If you use the same security group for both your delivery stream and the\n Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more\n information about security group rules, see Security group\n rules in the Amazon VPC documentation.

    ", + "smithy.api#documentation": "

    The IDs of the security groups that Firehose uses when it creates ENIs in\n the VPC of the Amazon ES destination. You can use the same security group that the Amazon\n ES domain uses or different ones. If you specify different security groups, ensure that\n they allow outbound HTTPS traffic to the Amazon ES domain's security group. Also ensure\n that the Amazon ES domain's security group allows HTTPS traffic from the security groups\n specified here. If you use the same security group for both your Firehose stream and the\n Amazon ES domain, make sure the security group inbound rule allows HTTPS traffic. For more\n information about security group rules, see Security group\n rules in the Amazon VPC documentation.

    ", "smithy.api#required": {} } }, @@ -7609,6 +8213,26 @@ "traits": { "smithy.api#documentation": "

    The details of the VPC of the Amazon ES destination.

    " } + }, + "com.amazonaws.firehose#VpcEndpointServiceName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 47, + "max": 255 + }, + "smithy.api#pattern": "^([a-zA-Z0-9\\-\\_]+\\.){2,3}vpce\\.[a-zA-Z0-9\\-]*\\.vpce-svc\\-[a-zA-Z0-9\\-]{17}$" + } + }, + "com.amazonaws.firehose#WarehouseLocation": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^s3:\\/\\/" + } } } }