From 796368e22db724f1a430e4c7a6e3a177b4e10d82 Mon Sep 17 00:00:00 2001 From: aws-sdk-go-automation <43143561+aws-sdk-go-automation@users.noreply.github.com> Date: Mon, 14 Feb 2022 11:14:51 -0800 Subject: [PATCH] Release v1.42.53 (2022-02-14) (#4277) Release v1.42.53 (2022-02-14) === ### Service Client Updates * `service/appflow`: Updates service API and documentation * `service/athena`: Updates service API and documentation * This release adds a subfield, ErrorType, to the AthenaError response object in the GetQueryExecution API when a query fails. * `service/rds`: Updates service API, documentation, waiters, paginators, and examples * Adds support for determining which Aurora PostgreSQL versions support Babelfish. --- CHANGELOG.md | 10 ++ aws/version.go | 2 +- models/apis/appflow/2020-08-23/api-2.json | 22 ++- models/apis/appflow/2020-08-23/docs-2.json | 20 ++- models/apis/athena/2017-05-18/api-2.json | 9 +- models/apis/athena/2017-05-18/docs-2.json | 12 +- models/apis/rds/2014-10-31/api-2.json | 6 +- models/apis/rds/2014-10-31/docs-2.json | 16 +- service/appflow/api.go | 179 +++++++++++++++++++++ service/athena/api.go | 26 +-- service/rds/api.go | 119 +++++++++++--- 11 files changed, 369 insertions(+), 52 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ae9db417774..9eeb2b27be5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.42.53 (2022-02-14) +=== + +### Service Client Updates +* `service/appflow`: Updates service API and documentation +* `service/athena`: Updates service API and documentation + * This release adds a subfield, ErrorType, to the AthenaError response object in the GetQueryExecution API when a query fails. +* `service/rds`: Updates service API, documentation, waiters, paginators, and examples + * Adds support for determining which Aurora PostgreSQL versions support Babelfish. + Release v1.42.52 (2022-02-11) === diff --git a/aws/version.go b/aws/version.go index c3ca9cb5fab..df7d622d1af 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.52" +const SDKVersion = "1.42.53" diff --git a/models/apis/appflow/2020-08-23/api-2.json b/models/apis/appflow/2020-08-23/api-2.json index c5e2ce867bc..e615787af08 100644 --- a/models/apis/appflow/2020-08-23/api-2.json +++ b/models/apis/appflow/2020-08-23/api-2.json @@ -1338,7 +1338,8 @@ "Honeycode":{"shape":"HoneycodeDestinationProperties"}, "CustomerProfiles":{"shape":"CustomerProfilesDestinationProperties"}, "Zendesk":{"shape":"ZendeskDestinationProperties"}, - "CustomConnector":{"shape":"CustomConnectorDestinationProperties"} + "CustomConnector":{"shape":"CustomConnectorDestinationProperties"}, + "SAPOData":{"shape":"SAPODataDestinationProperties"} } }, "DestinationField":{ @@ -2415,6 +2416,17 @@ "oAuthProperties":{"shape":"OAuthProperties"} } }, + "SAPODataDestinationProperties":{ + "type":"structure", + "required":["objectPath"], + "members":{ + "objectPath":{"shape":"Object"}, + "successResponseHandlingConfig":{"shape":"SuccessResponseHandlingConfig"}, + "idFieldNames":{"shape":"IdFieldNameList"}, + "errorHandlingConfig":{"shape":"ErrorHandlingConfig"}, + "writeOperationType":{"shape":"WriteOperationType"} + } + }, "SAPODataMetadata":{ "type":"structure", "members":{ @@ -2837,6 +2849,13 @@ "max":2048, "pattern":".*" }, + "SuccessResponseHandlingConfig":{ + "type":"structure", + "members":{ + "bucketPrefix":{"shape":"BucketPrefix"}, + "bucketName":{"shape":"BucketName"} + } + }, "SupportedApiVersion":{ "type":"string", "max":256, @@ -2937,6 +2956,7 @@ "Map_all", "Mask", "Merge", + "Passthrough", "Truncate", "Validate" ] diff --git a/models/apis/appflow/2020-08-23/docs-2.json b/models/apis/appflow/2020-08-23/docs-2.json index ab7643d236a..c3c714959a9 100644 --- a/models/apis/appflow/2020-08-23/docs-2.json +++ b/models/apis/appflow/2020-08-23/docs-2.json @@ -268,7 +268,8 @@ "S3DestinationProperties$bucketName": "

The Amazon S3 bucket name in which Amazon AppFlow places the transferred data.

", "S3SourceProperties$bucketName": "

The Amazon S3 bucket name where the source files are stored.

", "SnowflakeConnectorProfileProperties$bucketName": "

The name of the Amazon S3 bucket associated with Snowflake.

", - "SnowflakeDestinationProperties$intermediateBucketName": "

The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.

" + "SnowflakeDestinationProperties$intermediateBucketName": "

The intermediate bucket that Amazon AppFlow uses when moving data into Snowflake.

", + "SuccessResponseHandlingConfig$bucketName": "

The name of the Amazon S3 bucket.

" } }, "BucketPrefix": { @@ -281,6 +282,7 @@ "S3SourceProperties$bucketPrefix": "

The object key for the Amazon S3 bucket in which the source files are stored.

", "SnowflakeConnectorProfileProperties$bucketPrefix": "

The bucket path that refers to the Amazon S3 bucket associated with Snowflake.

", "SnowflakeDestinationProperties$bucketPrefix": "

The object key for the destination bucket in which Amazon AppFlow places the files.

", + "SuccessResponseHandlingConfig$bucketPrefix": "

The Amazon S3 bucket prefix.

", "UpsolverDestinationProperties$bucketPrefix": "

The object key for the destination Upsolver Amazon S3 bucket in which Amazon AppFlow places the files.

" } }, @@ -1008,6 +1010,7 @@ "EventBridgeDestinationProperties$errorHandlingConfig": null, "HoneycodeDestinationProperties$errorHandlingConfig": null, "RedshiftDestinationProperties$errorHandlingConfig": "

The settings that determine how Amazon AppFlow handles an error when placing data in the Amazon Redshift destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

", + "SAPODataDestinationProperties$errorHandlingConfig": null, "SalesforceDestinationProperties$errorHandlingConfig": "

The settings that determine how Amazon AppFlow handles an error when placing data in the Salesforce destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

", "SnowflakeDestinationProperties$errorHandlingConfig": "

The settings that determine how Amazon AppFlow handles an error when placing data in the Snowflake destination. For example, this setting would determine if the flow should fail after one insertion error, or continue and attempt to insert every record regardless of the initial failure. ErrorHandlingConfig is a part of the destination connector details.

", "ZendeskDestinationProperties$errorHandlingConfig": null @@ -1242,6 +1245,7 @@ "base": "

A list of field names that can be used as an ID field when performing a write operation.

", "refs": { "CustomConnectorDestinationProperties$idFieldNames": "

The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update, delete, or upsert.

", + "SAPODataDestinationProperties$idFieldNames": null, "SalesforceDestinationProperties$idFieldNames": "

The name of the field that Amazon AppFlow uses as an ID when performing a write operation such as update or delete.

", "ZendeskDestinationProperties$idFieldNames": null } @@ -1548,6 +1552,7 @@ "InforNexusSourceProperties$object": "

The object specified in the Infor Nexus flow source.

", "MarketoSourceProperties$object": "

The object specified in the Marketo flow source.

", "RedshiftDestinationProperties$object": "

The object specified in the Amazon Redshift flow destination.

", + "SAPODataDestinationProperties$objectPath": "

The object path specified in the SAPOData flow destination.

", "SAPODataSourceProperties$objectPath": "

The object path specified in the SAPOData flow source.

", "SalesforceDestinationProperties$object": "

The object specified in the Salesforce flow destination.

", "SalesforceSourceProperties$object": "

The object specified in the Salesforce flow source.

", @@ -1824,6 +1829,12 @@ "ConnectorProfileProperties$SAPOData": null } }, + "SAPODataDestinationProperties": { + "base": "

The properties that are applied when using SAPOData as a flow destination

", + "refs": { + "DestinationConnectorProperties$SAPOData": "

The properties required to query SAPOData.

" + } + }, "SAPODataMetadata": { "base": "

The connector metadata specific to SAPOData.

", "refs": { @@ -2088,6 +2099,12 @@ "SourceFields$member": null } }, + "SuccessResponseHandlingConfig": { + "base": "

Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data.

For example, this setting would determine where to write the response from the destination connector upon a successful insert operation.

", + "refs": { + "SAPODataDestinationProperties$successResponseHandlingConfig": "

Determines how Amazon AppFlow handles the success response that it gets from the connector after placing data.

For example, this setting would determine where to write the response from a destination connector upon a successful insert operation.

" + } + }, "SupportedApiVersion": { "base": null, "refs": { @@ -2411,6 +2428,7 @@ "base": "

The possible write operations in the destination connector. When this value is not provided, this defaults to the INSERT operation.

", "refs": { "CustomConnectorDestinationProperties$writeOperationType": "

Specifies the type of write operation to be performed in the custom connector when it's used as destination.

", + "SAPODataDestinationProperties$writeOperationType": null, "SalesforceDestinationProperties$writeOperationType": "

This specifies the type of write operation to be performed in Salesforce. When the value is UPSERT, then idFieldNames is required.

", "SupportedWriteOperationList$member": null, "ZendeskDestinationProperties$writeOperationType": null diff --git a/models/apis/athena/2017-05-18/api-2.json b/models/apis/athena/2017-05-18/api-2.json index cf5e62c8e80..22a7133cfe5 100644 --- a/models/apis/athena/2017-05-18/api-2.json +++ b/models/apis/athena/2017-05-18/api-2.json @@ -480,7 +480,8 @@ "AthenaError":{ "type":"structure", "members":{ - "ErrorCategory":{"shape":"ErrorCategory"} + "ErrorCategory":{"shape":"ErrorCategory"}, + "ErrorType":{"shape":"ErrorType"} } }, "BatchGetNamedQueryInput":{ @@ -810,6 +811,12 @@ "min":1 }, "ErrorMessage":{"type":"string"}, + "ErrorType":{ + "type":"integer", + "box":true, + "max":9999, + "min":0 + }, "ExpressionString":{ "type":"string", "max":256, diff --git a/models/apis/athena/2017-05-18/docs-2.json b/models/apis/athena/2017-05-18/docs-2.json index 17ff0e4d24a..670c42e236f 100644 --- a/models/apis/athena/2017-05-18/docs-2.json +++ b/models/apis/athena/2017-05-18/docs-2.json @@ -17,7 +17,7 @@ "GetNamedQuery": "

Returns information about a single query. Requires that you have access to the workgroup in which the query was saved.

", "GetPreparedStatement": "

Retrieves the prepared statement with the specified name from the specified workgroup.

", "GetQueryExecution": "

Returns information about a single execution of a query if you have access to the workgroup in which the query ran. Each time a query executes, information about the query execution is saved with a unique ID.

", - "GetQueryResults": "

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner setting, the setting also applies to Amazon S3 read operations when GetQueryResults is called. If an expected bucket owner has been specified and the query results are in an Amazon S3 bucket whose owner account ID is different from the expected bucket owner, the GetQueryResults call fails with an Amazon S3 permissions error.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

", + "GetQueryResults": "

Streams the results of a single query execution specified by QueryExecutionId from the Athena query results location in Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query but returns results. Use StartQueryExecution to run a query.

To stream query results successfully, the IAM principal with permission to call GetQueryResults also must have permissions to the Amazon S3 GetObject action for the Athena query results location.

IAM principals with permission to the Amazon S3 GetObject action for the query results location are able to retrieve query results from Amazon S3 even if permission to the GetQueryResults action is denied. To restrict user or role access, ensure that Amazon S3 permissions to the Athena query location are denied.

", "GetTableMetadata": "

Returns table metadata for the specified catalog, database, and table.

", "GetWorkGroup": "

Returns information about the workgroup with the specified name.

", "ListDataCatalogs": "

Lists the data catalogs in the current Amazon Web Services account.

", @@ -83,8 +83,8 @@ "base": null, "refs": { "DeleteWorkGroupInput$RecursiveDeleteOption": "

The option to delete the workgroup and its contents even if the workgroup contains any named queries or query executions.

", - "ResultConfigurationUpdates$RemoveOutputLocation": "

If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

", - "ResultConfigurationUpdates$RemoveEncryptionConfiguration": "

If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

", + "ResultConfigurationUpdates$RemoveOutputLocation": "

If set to \"true\", indicates that the previously-specified query results location (also known as a client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the OutputLocation in ResultConfigurationUpdates (the client-side setting), the OutputLocation in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

", + "ResultConfigurationUpdates$RemoveEncryptionConfiguration": "

If set to \"true\", indicates that the previously-specified encryption configuration (also known as the client-side setting) for queries in this workgroup should be ignored and set to null. If set to \"false\" or not set, and a value is present in the EncryptionConfiguration in ResultConfigurationUpdates (the client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration will be updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

", "ResultConfigurationUpdates$RemoveExpectedBucketOwner": "

If set to \"true\", removes the Amazon Web Services account ID previously specified for ResultConfiguration$ExpectedBucketOwner. If set to \"false\" or not set, and a value is present in the ExpectedBucketOwner in ResultConfigurationUpdates (the client-side setting), the ExpectedBucketOwner in the workgroup's ResultConfiguration is updated with the new value. For more information, see Workgroup Settings Override Client-Side Settings.

", "WorkGroupConfiguration$EnforceWorkGroupConfiguration": "

If set to \"true\", the settings for the workgroup override client-side settings. If set to \"false\", client-side settings are used. For more information, see Workgroup Settings Override Client-Side Settings.

", "WorkGroupConfiguration$PublishCloudWatchMetricsEnabled": "

Indicates that the Amazon CloudWatch metrics are enabled for the workgroup.

", @@ -369,6 +369,12 @@ "UnprocessedQueryExecutionId$ErrorMessage": "

The error message returned when the query execution failed to process, if applicable.

" } }, + "ErrorType": { + "base": null, + "refs": { + "AthenaError$ErrorType": "

An integer value that provides specific information about an Athena query error. For the meaning of specific values, see the Error Type Reference in the Amazon Athena User Guide.

" + } + }, "ExpressionString": { "base": null, "refs": { diff --git a/models/apis/rds/2014-10-31/api-2.json b/models/apis/rds/2014-10-31/api-2.json index 4526d52b17f..7f64b477e30 100644 --- a/models/apis/rds/2014-10-31/api-2.json +++ b/models/apis/rds/2014-10-31/api-2.json @@ -3969,7 +3969,8 @@ "DBEngineVersionArn":{"shape":"String"}, "KMSKeyId":{"shape":"String"}, "CreateTime":{"shape":"TStamp"}, - "TagList":{"shape":"TagList"} + "TagList":{"shape":"TagList"}, + "SupportsBabelfish":{"shape":"Boolean"} } }, "DBEngineVersionList":{ @@ -8388,7 +8389,8 @@ "IsMajorVersionUpgrade":{"shape":"Boolean"}, "SupportedEngineModes":{"shape":"EngineModeList"}, "SupportsParallelQuery":{"shape":"BooleanOptional"}, - "SupportsGlobalDatabases":{"shape":"BooleanOptional"} + "SupportsGlobalDatabases":{"shape":"BooleanOptional"}, + "SupportsBabelfish":{"shape":"BooleanOptional"} } }, "UserAuthConfig":{ diff --git a/models/apis/rds/2014-10-31/docs-2.json b/models/apis/rds/2014-10-31/docs-2.json index 7ee4ca80f85..316d880618a 100644 --- a/models/apis/rds/2014-10-31/docs-2.json +++ b/models/apis/rds/2014-10-31/docs-2.json @@ -344,6 +344,7 @@ "DBEngineVersion$SupportsReadReplica": "

Indicates whether the database engine version supports read replicas.

", "DBEngineVersion$SupportsParallelQuery": "

A value that indicates whether you can use Aurora parallel query with a specific DB engine version.

", "DBEngineVersion$SupportsGlobalDatabases": "

A value that indicates whether you can use Aurora global databases with a specific DB engine version.

", + "DBEngineVersion$SupportsBabelfish": "

A value that indicates whether the engine version supports Babelfish for Aurora PostgreSQL.

", "DBInstance$MultiAZ": "

Specifies if the DB instance is a Multi-AZ deployment. This setting doesn't apply to RDS Custom.

", "DBInstance$AutoMinorVersionUpgrade": "

A value that indicates that minor version patches are applied automatically.

", "DBInstance$PubliclyAccessible": "

Specifies the accessibility options for the DB instance.

When the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is ultimately controlled by the security group it uses. That public access isn't permitted if the security group assigned to the DB cluster doesn't permit it.

When the DB instance isn't publicly accessible, it is an internal DB instance with a DNS name that resolves to a private IP address.

For more information, see CreateDBInstance.

", @@ -546,7 +547,8 @@ "StartActivityStreamResponse$EngineNativeAuditFieldsIncluded": "

Indicates whether engine-native audit fields are included in the database activity stream.

", "StopActivityStreamRequest$ApplyImmediately": "

Specifies whether or not the database activity stream is to stop as soon as possible, regardless of the maintenance window for the database.

", "UpgradeTarget$SupportsParallelQuery": "

A value that indicates whether you can use Aurora parallel query with the target engine version.

", - "UpgradeTarget$SupportsGlobalDatabases": "

A value that indicates whether you can use Aurora global databases with the target engine version.

" + "UpgradeTarget$SupportsGlobalDatabases": "

A value that indicates whether you can use Aurora global databases with the target engine version.

", + "UpgradeTarget$SupportsBabelfish": "

A value that indicates whether you can use Babelfish for Aurora PostgreSQL with the target engine version.

" } }, "BucketName": { @@ -2305,10 +2307,10 @@ "DescribeDBClusterParameterGroupsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBClusterParametersMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBClusterSnapshotsMessage$Filters": "

A filter that specifies one or more DB cluster snapshots to describe.

Supported filters:

", - "DescribeDBClustersMessage$Filters": "

A filter that specifies one or more DB clusters to describe.

Supported filters:

", - "DescribeDBEngineVersionsMessage$Filters": "

This parameter isn't currently supported.

", + "DescribeDBClustersMessage$Filters": "

A filter that specifies one or more DB clusters to describe.

Supported filters:

", + "DescribeDBEngineVersionsMessage$Filters": "

A filter that specifies one or more DB engine versions to describe.

Supported filters:

", "DescribeDBInstanceAutomatedBackupsMessage$Filters": "

A filter that specifies which resources to return based on status.

Supported filters are the following:

Returns all resources by default. The status for each resource is specified in the response.

", - "DescribeDBInstancesMessage$Filters": "

A filter that specifies one or more DB instances to describe.

Supported filters:

", + "DescribeDBInstancesMessage$Filters": "

A filter that specifies one or more DB instances to describe.

Supported filters:

", "DescribeDBLogFilesMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBParameterGroupsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeDBParametersMessage$Filters": "

This parameter isn't currently supported.

", @@ -2330,7 +2332,7 @@ "DescribeOptionGroupOptionsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeOptionGroupsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeOrderableDBInstanceOptionsMessage$Filters": "

This parameter isn't currently supported.

", - "DescribePendingMaintenanceActionsMessage$Filters": "

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

", + "DescribePendingMaintenanceActionsMessage$Filters": "

A filter that specifies one or more resources to return pending maintenance actions for.

Supported filters:

", "DescribeReservedDBInstancesMessage$Filters": "

This parameter isn't currently supported.

", "DescribeReservedDBInstancesOfferingsMessage$Filters": "

This parameter isn't currently supported.

", "DescribeSourceRegionsMessage$Filters": "

This parameter isn't currently supported.

", @@ -4359,7 +4361,7 @@ "ModifyDBClusterMessage$PreferredBackupWindow": "

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To view the time blocks available, see Backup window in the Amazon Aurora User Guide.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "ModifyDBClusterMessage$PreferredMaintenanceWindow": "

The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).

Format: ddd:hh24:mi-ddd:hh24:mi

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region, occurring on a random day of the week. To see the time blocks available, see Adjusting the Preferred DB Cluster Maintenance Window in the Amazon Aurora User Guide.

Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun.

Constraints: Minimum 30-minute window.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "ModifyDBClusterMessage$EngineVersion": "

The version number of the database engine to which you want to upgrade. Changing this parameter results in an outage. The change is applied during the next maintenance window unless ApplyImmediately is enabled.

To list all of the available engine versions for MySQL 5.6-compatible Aurora, use the following command:

aws rds describe-db-engine-versions --engine aurora --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for MySQL 5.7-compatible and MySQL 8.0-compatible Aurora, use the following command:

aws rds describe-db-engine-versions --engine aurora-mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for Aurora PostgreSQL, use the following command:

aws rds describe-db-engine-versions --engine aurora-postgresql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for RDS for MySQL, use the following command:

aws rds describe-db-engine-versions --engine mysql --query \"DBEngineVersions[].EngineVersion\"

To list all of the available engine versions for RDS for PostgreSQL, use the following command:

aws rds describe-db-engine-versions --engine postgres --query \"DBEngineVersions[].EngineVersion\"

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", - "ModifyDBClusterMessage$DBInstanceParameterGroupName": "

The name of the DB parameter group to apply to all instances of the DB cluster.

When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes aren't applied during the next maintenance window but instead are applied immediately.

Default: The existing name setting

Constraints:

Valid for: Aurora DB clusters only

", + "ModifyDBClusterMessage$DBInstanceParameterGroupName": "

The name of the DB parameter group to apply to all instances of the DB cluster.

When you apply a parameter group using the DBInstanceParameterGroupName parameter, the DB cluster isn't rebooted automatically. Also, parameter changes are applied immediately rather than during the next maintenance window.

Default: The existing name setting

Constraints:

Valid for: Aurora DB clusters only

", "ModifyDBClusterMessage$Domain": "

The Active Directory directory ID to move the DB cluster to. Specify none to remove the cluster from its current domain. The domain must be created prior to this operation.

For more information, see Kerberos Authentication in the Amazon Aurora User Guide.

Valid for: Aurora DB clusters only

", "ModifyDBClusterMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", "ModifyDBClusterMessage$DBClusterInstanceClass": "

The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", @@ -4551,7 +4553,7 @@ "RestoreDBClusterFromSnapshotMessage$DBClusterParameterGroupName": "

The name of the DB cluster parameter group to associate with this DB cluster. If this argument is omitted, the default DB cluster parameter group for the specified engine is used.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterFromSnapshotMessage$Domain": "

Specify the Active Directory directory ID to restore the DB cluster in. The domain must be created prior to this operation. Currently, only MySQL, Microsoft SQL Server, Oracle, and PostgreSQL DB instances can be created in an Active Directory Domain.

For more information, see Kerberos Authentication in the Amazon RDS User Guide.

Valid for: Aurora DB clusters only

", "RestoreDBClusterFromSnapshotMessage$DomainIAMRoleName": "

Specify the name of the IAM role to be used when making API calls to the Directory Service.

Valid for: Aurora DB clusters only

", - "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "

The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", + "RestoreDBClusterFromSnapshotMessage$DBClusterInstanceClass": "

The compute and memory capacity of the each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all Amazon Web Services Regions, or for all database engines.

For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the Amazon RDS User Guide.

Valid for: Multi-AZ DB clusters only

", "RestoreDBClusterFromSnapshotMessage$StorageType": "

Specifies the storage type to be associated with the each DB instance in the Multi-AZ DB cluster.

Valid values: io1

When specified, a value for the Iops parameter is required.

Default: io1

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$DBClusterIdentifier": "

The name of the new DB cluster to be created.

Constraints:

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", "RestoreDBClusterToPointInTimeMessage$RestoreType": "

The type of restore to be performed. You can specify one of the following values:

Constraints: You can't specify copy-on-write if the engine version of the source DB cluster is earlier than 1.11.

If you don't specify a RestoreType value, then the new DB cluster is restored as a full copy of the source DB cluster.

Valid for: Aurora DB clusters and Multi-AZ DB clusters

", diff --git a/service/appflow/api.go b/service/appflow/api.go index b8653f8505b..2905428965e 100644 --- a/service/appflow/api.go +++ b/service/appflow/api.go @@ -6906,6 +6906,9 @@ type DestinationConnectorProperties struct { // The properties required to query Amazon S3. S3 *S3DestinationProperties `type:"structure"` + // The properties required to query SAPOData. + SAPOData *SAPODataDestinationProperties `type:"structure"` + // The properties required to query Salesforce. Salesforce *SalesforceDestinationProperties `type:"structure"` @@ -6970,6 +6973,11 @@ func (s *DestinationConnectorProperties) Validate() error { invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) } } + if s.SAPOData != nil { + if err := s.SAPOData.Validate(); err != nil { + invalidParams.AddNested("SAPOData", err.(request.ErrInvalidParams)) + } + } if s.Salesforce != nil { if err := s.Salesforce.Validate(); err != nil { invalidParams.AddNested("Salesforce", err.(request.ErrInvalidParams)) @@ -7039,6 +7047,12 @@ func (s *DestinationConnectorProperties) SetS3(v *S3DestinationProperties) *Dest return s } +// SetSAPOData sets the SAPOData field's value. +func (s *DestinationConnectorProperties) SetSAPOData(v *SAPODataDestinationProperties) *DestinationConnectorProperties { + s.SAPOData = v + return s +} + // SetSalesforce sets the Salesforce field's value. func (s *DestinationConnectorProperties) SetSalesforce(v *SalesforceDestinationProperties) *DestinationConnectorProperties { s.Salesforce = v @@ -10767,6 +10781,109 @@ func (s *SAPODataConnectorProfileProperties) SetPrivateLinkServiceName(v string) return s } +// The properties that are applied when using SAPOData as a flow destination +type SAPODataDestinationProperties struct { + _ struct{} `type:"structure"` + + // The settings that determine how Amazon AppFlow handles an error when placing + // data in the destination. For example, this setting would determine if the + // flow should fail after one insertion error, or continue and attempt to insert + // every record regardless of the initial failure. ErrorHandlingConfig is a + // part of the destination connector details. + ErrorHandlingConfig *ErrorHandlingConfig `locationName:"errorHandlingConfig" type:"structure"` + + // A list of field names that can be used as an ID field when performing a write + // operation. + IdFieldNames []*string `locationName:"idFieldNames" type:"list"` + + // The object path specified in the SAPOData flow destination. + // + // ObjectPath is a required field + ObjectPath *string `locationName:"objectPath" type:"string" required:"true"` + + // Determines how Amazon AppFlow handles the success response that it gets from + // the connector after placing data. + // + // For example, this setting would determine where to write the response from + // a destination connector upon a successful insert operation. + SuccessResponseHandlingConfig *SuccessResponseHandlingConfig `locationName:"successResponseHandlingConfig" type:"structure"` + + // The possible write operations in the destination connector. When this value + // is not provided, this defaults to the INSERT operation. + WriteOperationType *string `locationName:"writeOperationType" type:"string" enum:"WriteOperationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SAPODataDestinationProperties) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SAPODataDestinationProperties) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SAPODataDestinationProperties) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SAPODataDestinationProperties"} + if s.ObjectPath == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectPath")) + } + if s.ErrorHandlingConfig != nil { + if err := s.ErrorHandlingConfig.Validate(); err != nil { + invalidParams.AddNested("ErrorHandlingConfig", err.(request.ErrInvalidParams)) + } + } + if s.SuccessResponseHandlingConfig != nil { + if err := s.SuccessResponseHandlingConfig.Validate(); err != nil { + invalidParams.AddNested("SuccessResponseHandlingConfig", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorHandlingConfig sets the ErrorHandlingConfig field's value. +func (s *SAPODataDestinationProperties) SetErrorHandlingConfig(v *ErrorHandlingConfig) *SAPODataDestinationProperties { + s.ErrorHandlingConfig = v + return s +} + +// SetIdFieldNames sets the IdFieldNames field's value. +func (s *SAPODataDestinationProperties) SetIdFieldNames(v []*string) *SAPODataDestinationProperties { + s.IdFieldNames = v + return s +} + +// SetObjectPath sets the ObjectPath field's value. +func (s *SAPODataDestinationProperties) SetObjectPath(v string) *SAPODataDestinationProperties { + s.ObjectPath = &v + return s +} + +// SetSuccessResponseHandlingConfig sets the SuccessResponseHandlingConfig field's value. +func (s *SAPODataDestinationProperties) SetSuccessResponseHandlingConfig(v *SuccessResponseHandlingConfig) *SAPODataDestinationProperties { + s.SuccessResponseHandlingConfig = v + return s +} + +// SetWriteOperationType sets the WriteOperationType field's value. +func (s *SAPODataDestinationProperties) SetWriteOperationType(v string) *SAPODataDestinationProperties { + s.WriteOperationType = &v + return s +} + // The connector metadata specific to SAPOData. type SAPODataMetadata struct { _ struct{} `type:"structure"` @@ -12734,6 +12851,64 @@ func (s *StopFlowOutput) SetFlowStatus(v string) *StopFlowOutput { return s } +// Determines how Amazon AppFlow handles the success response that it gets from +// the connector after placing data. +// +// For example, this setting would determine where to write the response from +// the destination connector upon a successful insert operation. +type SuccessResponseHandlingConfig struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket. + BucketName *string `locationName:"bucketName" min:"3" type:"string"` + + // The Amazon S3 bucket prefix. + BucketPrefix *string `locationName:"bucketPrefix" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuccessResponseHandlingConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SuccessResponseHandlingConfig) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SuccessResponseHandlingConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SuccessResponseHandlingConfig"} + if s.BucketName != nil && len(*s.BucketName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BucketName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucketName sets the BucketName field's value. +func (s *SuccessResponseHandlingConfig) SetBucketName(v string) *SuccessResponseHandlingConfig { + s.BucketName = &v + return s +} + +// SetBucketPrefix sets the BucketPrefix field's value. +func (s *SuccessResponseHandlingConfig) SetBucketPrefix(v string) *SuccessResponseHandlingConfig { + s.BucketPrefix = &v + return s +} + // Contains details regarding all the supported FieldTypes and their corresponding // filterOperators and supportedValues. type SupportedFieldTypeDetails struct { @@ -16045,6 +16220,9 @@ const ( // TaskTypeMerge is a TaskType enum value TaskTypeMerge = "Merge" + // TaskTypePassthrough is a TaskType enum value + TaskTypePassthrough = "Passthrough" + // TaskTypeTruncate is a TaskType enum value TaskTypeTruncate = "Truncate" @@ -16061,6 +16239,7 @@ func TaskType_Values() []string { TaskTypeMapAll, TaskTypeMask, TaskTypeMerge, + TaskTypePassthrough, TaskTypeTruncate, TaskTypeValidate, } diff --git a/service/athena/api.go b/service/athena/api.go index cdf48897977..20416c74e85 100644 --- a/service/athena/api.go +++ b/service/athena/api.go @@ -1380,13 +1380,6 @@ func (c *Athena) GetQueryResultsRequest(input *GetQueryResultsInput) (req *reque // in the Amazon Athena User Guide. This request does not execute the query // but returns results. Use StartQueryExecution to run a query. // -// If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner -// setting, the setting also applies to Amazon S3 read operations when GetQueryResults -// is called. If an expected bucket owner has been specified and the query results -// are in an Amazon S3 bucket whose owner account ID is different from the expected -// bucket owner, the GetQueryResults call fails with an Amazon S3 permissions -// error. -// // To stream query results successfully, the IAM principal with permission to // call GetQueryResults also must have permissions to the Amazon S3 GetObject // action for the Athena query results location. @@ -3558,6 +3551,11 @@ type AthenaError struct { // // 3 - Unknown ErrorCategory *int64 `min:"1" type:"integer"` + + // An integer value that provides specific information about an Athena query + // error. For the meaning of specific values, see the Error Type Reference (https://docs.aws.amazon.com/athena/latest/ug/error-reference.html#error-reference-error-type-reference) + // in the Amazon Athena User Guide. + ErrorType *int64 `type:"integer"` } // String returns the string representation. @@ -3584,6 +3582,12 @@ func (s *AthenaError) SetErrorCategory(v int64) *AthenaError { return s } +// SetErrorType sets the ErrorType field's value. +func (s *AthenaError) SetErrorType(v int64) *AthenaError { + s.ErrorType = &v + return s +} + type BatchGetNamedQueryInput struct { _ struct{} `type:"structure"` @@ -7800,7 +7804,7 @@ type ResultConfigurationUpdates struct { // be ignored and set to null. If set to "false" or not set, and a value is // present in the EncryptionConfiguration in ResultConfigurationUpdates (the // client-side setting), the EncryptionConfiguration in the workgroup's ResultConfiguration - // is updated with the new value. For more information, see Workgroup Settings + // will be updated with the new value. For more information, see Workgroup Settings // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). RemoveEncryptionConfiguration *bool `type:"boolean"` @@ -7816,9 +7820,9 @@ type ResultConfigurationUpdates struct { // (also known as a client-side setting) for queries in this workgroup should // be ignored and set to null. If set to "false" or not set, and a value is // present in the OutputLocation in ResultConfigurationUpdates (the client-side - // setting), the OutputLocation in the workgroup's ResultConfiguration is updated - // with the new value. For more information, see Workgroup Settings Override - // Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). + // setting), the OutputLocation in the workgroup's ResultConfiguration will + // be updated with the new value. For more information, see Workgroup Settings + // Override Client-Side Settings (https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html). RemoveOutputLocation *bool `type:"boolean"` } diff --git a/service/rds/api.go b/service/rds/api.go index b25c5844e0f..0f61c93a93b 100644 --- a/service/rds/api.go +++ b/service/rds/api.go @@ -18638,6 +18638,10 @@ type CreateCustomDBEngineVersionOutput struct { // of the CreateDBInstance action. SupportedTimezones []*Timezone `locationNameList:"Timezone" type:"list"` + // A value that indicates whether the engine version supports Babelfish for + // Aurora PostgreSQL. + SupportsBabelfish *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with a // specific DB engine version. SupportsGlobalDatabases *bool `type:"boolean"` @@ -18794,6 +18798,12 @@ func (s *CreateCustomDBEngineVersionOutput) SetSupportedTimezones(v []*Timezone) return s } +// SetSupportsBabelfish sets the SupportsBabelfish field's value. +func (s *CreateCustomDBEngineVersionOutput) SetSupportsBabelfish(v bool) *CreateCustomDBEngineVersionOutput { + s.SupportsBabelfish = &v + return s +} + // SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. func (s *CreateCustomDBEngineVersionOutput) SetSupportsGlobalDatabases(v bool) *CreateCustomDBEngineVersionOutput { s.SupportsGlobalDatabases = &v @@ -25316,6 +25326,10 @@ type DBEngineVersion struct { // of the CreateDBInstance action. SupportedTimezones []*Timezone `locationNameList:"Timezone" type:"list"` + // A value that indicates whether the engine version supports Babelfish for + // Aurora PostgreSQL. + SupportsBabelfish *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with a // specific DB engine version. SupportsGlobalDatabases *bool `type:"boolean"` @@ -25472,6 +25486,12 @@ func (s *DBEngineVersion) SetSupportedTimezones(v []*Timezone) *DBEngineVersion return s } +// SetSupportsBabelfish sets the SupportsBabelfish field's value. +func (s *DBEngineVersion) SetSupportsBabelfish(v bool) *DBEngineVersion { + s.SupportsBabelfish = &v + return s +} + // SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. func (s *DBEngineVersion) SetSupportsGlobalDatabases(v bool) *DBEngineVersion { s.SupportsGlobalDatabases = &v @@ -28368,6 +28388,10 @@ type DeleteCustomDBEngineVersionOutput struct { // of the CreateDBInstance action. SupportedTimezones []*Timezone `locationNameList:"Timezone" type:"list"` + // A value that indicates whether the engine version supports Babelfish for + // Aurora PostgreSQL. + SupportsBabelfish *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with a // specific DB engine version. SupportsGlobalDatabases *bool `type:"boolean"` @@ -28524,6 +28548,12 @@ func (s *DeleteCustomDBEngineVersionOutput) SetSupportedTimezones(v []*Timezone) return s } +// SetSupportsBabelfish sets the SupportsBabelfish field's value. +func (s *DeleteCustomDBEngineVersionOutput) SetSupportsBabelfish(v bool) *DeleteCustomDBEngineVersionOutput { + s.SupportsBabelfish = &v + return s +} + // SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. func (s *DeleteCustomDBEngineVersionOutput) SetSupportsGlobalDatabases(v bool) *DeleteCustomDBEngineVersionOutput { s.SupportsGlobalDatabases = &v @@ -31442,18 +31472,18 @@ type DescribeDBClustersInput struct { // // Supported filters: // - // * clone-group-id - Accepts clone group identifiers. The results list will - // only include information about the DB clusters associated with these clone + // * clone-group-id - Accepts clone group identifiers. The results list only + // includes information about the DB clusters associated with these clone // groups. // // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon - // Resource Names (ARNs). The results list will only include information - // about the DB clusters identified by these ARNs. + // Resource Names (ARNs). The results list only includes information about + // the DB clusters identified by these ARNs. // - // * domain - Accepts Active Directory directory IDs. The results list will - // only include information about the DB clusters associated with these domains. + // * domain - Accepts Active Directory directory IDs. The results list only + // includes information about the DB clusters associated with these domains. // - // * engine - Accepts engine names. The results list will only include information + // * engine - Accepts engine names. The results list only includes information // about the DB clusters for these engines. Filters []*Filter `locationNameList:"Filter" type:"list"` @@ -31638,7 +31668,28 @@ type DescribeDBEngineVersionsInput struct { // Example: 5.1.49 EngineVersion *string `type:"string"` - // This parameter isn't currently supported. + // A filter that specifies one or more DB engine versions to describe. + // + // Supported filters: + // + // * db-parameter-group-family - Accepts parameter groups family names. The + // results list only includes information about the DB engine versions for + // these parameter group families. + // + // * engine - Accepts engine names. The results list only includes information + // about the DB engine versions for these engines. + // + // * engine-mode - Accepts DB engine modes. The results list only includes + // information about the DB engine versions for these engine modes. Valid + // DB engine modes are the following: global multimaster parallelquery provisioned + // serverless + // + // * engine-version - Accepts engine versions. The results list only includes + // information about the DB engine versions for these engine versions. + // + // * status - Accepts engine version statuses. The results list only includes + // information about the DB engine versions for these statuses. Valid statuses + // are the following: available deprecated Filters []*Filter `locationNameList:"Filter" type:"list"` // A value that indicates whether to include engine versions that aren't available @@ -32009,23 +32060,21 @@ type DescribeDBInstancesInput struct { // Supported filters: // // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon - // Resource Names (ARNs). The results list will only include information - // about the DB instances associated with the DB clusters identified by these - // ARNs. + // Resource Names (ARNs). The results list only includes information about + // the DB instances associated with the DB clusters identified by these ARNs. // // * db-instance-id - Accepts DB instance identifiers and DB instance Amazon - // Resource Names (ARNs). The results list will only include information - // about the DB instances identified by these ARNs. + // Resource Names (ARNs). The results list only includes information about + // the DB instances identified by these ARNs. // // * dbi-resource-id - Accepts DB instance resource identifiers. The results // list will only include information about the DB instances identified by // these DB instance resource identifiers. // - // * domain - Accepts Active Directory directory IDs. The results list will - // only include information about the DB instances associated with these - // domains. + // * domain - Accepts Active Directory directory IDs. The results list only + // includes information about the DB instances associated with these domains. // - // * engine - Accepts engine names. The results list will only include information + // * engine - Accepts engine names. The results list only includes information // about the DB instances for these engines. Filters []*Filter `locationNameList:"Filter" type:"list"` @@ -35496,12 +35545,12 @@ type DescribePendingMaintenanceActionsInput struct { // Supported filters: // // * db-cluster-id - Accepts DB cluster identifiers and DB cluster Amazon - // Resource Names (ARNs). The results list will only include pending maintenance + // Resource Names (ARNs). The results list only includes pending maintenance // actions for the DB clusters identified by these ARNs. // // * db-instance-id - Accepts DB instance identifiers and DB instance ARNs. - // The results list will only include pending maintenance actions for the - // DB instances identified by these ARNs. + // The results list only includes pending maintenance actions for the DB + // instances identified by these ARNs. Filters []*Filter `locationNameList:"Filter" type:"list"` // An optional pagination token provided by a previous DescribePendingMaintenanceActions @@ -38641,6 +38690,10 @@ type ModifyCustomDBEngineVersionOutput struct { // of the CreateDBInstance action. SupportedTimezones []*Timezone `locationNameList:"Timezone" type:"list"` + // A value that indicates whether the engine version supports Babelfish for + // Aurora PostgreSQL. + SupportsBabelfish *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with a // specific DB engine version. SupportsGlobalDatabases *bool `type:"boolean"` @@ -38797,6 +38850,12 @@ func (s *ModifyCustomDBEngineVersionOutput) SetSupportedTimezones(v []*Timezone) return s } +// SetSupportsBabelfish sets the SupportsBabelfish field's value. +func (s *ModifyCustomDBEngineVersionOutput) SetSupportsBabelfish(v bool) *ModifyCustomDBEngineVersionOutput { + s.SupportsBabelfish = &v + return s +} + // SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. func (s *ModifyCustomDBEngineVersionOutput) SetSupportsGlobalDatabases(v bool) *ModifyCustomDBEngineVersionOutput { s.SupportsGlobalDatabases = &v @@ -39155,8 +39214,8 @@ type ModifyDBClusterInput struct { // The name of the DB parameter group to apply to all instances of the DB cluster. // // When you apply a parameter group using the DBInstanceParameterGroupName parameter, - // the DB cluster isn't rebooted automatically. Also, parameter changes aren't - // applied during the next maintenance window but instead are applied immediately. + // the DB cluster isn't rebooted automatically. Also, parameter changes are + // applied immediately rather than during the next maintenance window. // // Default: The existing name setting // @@ -39165,8 +39224,8 @@ type ModifyDBClusterInput struct { // * The DB parameter group must be in the same DB parameter group family // as this DB cluster. // - // * The DBInstanceParameterGroupName parameter is only valid in combination - // with the AllowMajorVersionUpgrade parameter. + // * The DBInstanceParameterGroupName parameter is valid in combination with + // the AllowMajorVersionUpgrade parameter for a major version upgrade only. // // Valid for: Aurora DB clusters only DBInstanceParameterGroupName *string `type:"string"` @@ -46108,7 +46167,7 @@ type RestoreDBClusterFromSnapshotInput struct { // see DB Instance Class (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) // in the Amazon RDS User Guide. // - // Valid for: Aurora DB clusters and Multi-AZ DB clusters + // Valid for: Multi-AZ DB clusters only DBClusterInstanceClass *string `type:"string"` // The name of the DB cluster parameter group to associate with this DB cluster. @@ -50943,6 +51002,10 @@ type UpgradeTarget struct { // A list of the supported DB engine modes for the target engine version. SupportedEngineModes []*string `type:"list"` + // A value that indicates whether you can use Babelfish for Aurora PostgreSQL + // with the target engine version. + SupportsBabelfish *bool `type:"boolean"` + // A value that indicates whether you can use Aurora global databases with the // target engine version. SupportsGlobalDatabases *bool `type:"boolean"` @@ -51006,6 +51069,12 @@ func (s *UpgradeTarget) SetSupportedEngineModes(v []*string) *UpgradeTarget { return s } +// SetSupportsBabelfish sets the SupportsBabelfish field's value. +func (s *UpgradeTarget) SetSupportsBabelfish(v bool) *UpgradeTarget { + s.SupportsBabelfish = &v + return s +} + // SetSupportsGlobalDatabases sets the SupportsGlobalDatabases field's value. func (s *UpgradeTarget) SetSupportsGlobalDatabases(v bool) *UpgradeTarget { s.SupportsGlobalDatabases = &v