From 63d7f8c539e0fd782fa5bf997bd6ac2730e4bfda Mon Sep 17 00:00:00 2001
From: AllanZhengYP Creates a new connector profile associated with your Amazon Web Services account. There is a soft quota
- * of 100 connector profiles per Amazon Web Services account. If you need more connector profiles than this quota
- * allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support
- * channel. Creates a new connector profile associated with your Amazon Web Services account. There
+ * is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
+ * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team
+ * through the Amazon AppFlow support channel. Describes the given custom connector registered in your Amazon Web Services account. This
+ * API can be used for custom connectors that are registered in your account and also for Amazon
+ * authored connectors. Provides details regarding the entity used with the connector, with a description of the
* data model for each entity. Returns the list of all registered custom connectors in your Amazon Web Services account.
+ * This API lists only custom connectors registered in this account, not the Amazon Web Services
+ * authored connectors. Lists all of the flows associated with your account. Registers a new connector with your Amazon Web Services account. Before you can register
+ * the connector, you must deploy lambda in your account. Activates an existing flow. For on-demand flows, this operation runs the flow
* immediately. For schedule and event-triggered flows, this operation activates the flow. Unregisters the custom connector registered in your account that matches the
+ * connectorLabel provided in the request. Removes a tag from the specified flow. Creates a new connector profile associated with your Amazon Web Services account. There is a soft quota
- * of 100 connector profiles per Amazon Web Services account. If you need more connector profiles than this quota
- * allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support
- * channel. Creates a new connector profile associated with your Amazon Web Services account. There
+ * is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more
+ * connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team
+ * through the Amazon AppFlow support channel. Describes the given custom connector registered in your Amazon Web Services account. This
+ * API can be used for custom connectors that are registered in your account and also for Amazon
+ * authored connectors. Returns the list of all registered custom connectors in your Amazon Web Services account.
+ * This API lists only custom connectors registered in this account, not the Amazon Web Services
+ * authored connectors. Registers a new connector with your Amazon Web Services account. Before you can register
+ * the connector, you must deploy lambda in your account. Unregisters the custom connector registered in your account that matches the
+ * connectorLabel provided in the request. AppFlow/Requester has invalid or missing permissions. The API key credentials required for API key authentication. The API key required for API key authentication. The API secret key required for API key authentication. Information about required authentication parameters. The authentication key required to authenticate with the connector. Indicates whether this authentication parameter is required. Label used for authentication parameter. A description about the authentication parameter. Indicates whether this authentication parameter is a sensitive field. Contains default values for this authentication parameter that are supplied by the
+ * connector. Configuration information required for custom authentication. The authentication type that the custom connector uses. Information about authentication parameters required for authentication. Contains the default values required for OAuth 2.0 authentication. OAuth 2.0 scopes that the connector supports. Token URLs that can be used for OAuth 2.0 authentication. Auth code URLs that can be used for OAuth 2.0 authentication. OAuth 2.0 grant types supported by the connector. Contains information about the authentication config that the connector supports. Indicates whether basic authentication is supported by the connector. Indicates whether API key authentication is supported by the connector Indicates whether OAuth 2.0 authentication is supported by the connector. Indicates whether custom authentication is supported by the connector Contains the default values required for OAuth 2.0 authentication. Contains information required for custom authentication. The basic auth credentials required for basic authentication. Contains information about the configuration of the lambda which is being registered as
+ * the connector. Lambda ARN of the connector being registered. Contains information about the configuration of the connector being registered. Contains information about the configuration of the lambda which is being registered as
+ * the connector. Contains information about the connector runtime settings that are required for flow
+ * execution. Contains value information about the connector runtime setting. Data type of the connector runtime setting. Indicates whether this connector runtime setting is required. A label used for connector runtime setting. A description about the connector runtime setting. Indicates the scope of the connector runtime setting. Contains default values for the connector runtime setting that are supplied by the
+ * connector. The configuration settings related to a given connector. supportedRegions
, privateLinkServiceUrl
, and so on.
The connector type.
+ */ + connectorType?: ConnectorType | string; + + /** + *The label used for registering the connector.
+ */ + connectorLabel?: string; + + /** + *A description about the connector.
+ */ + connectorDescription?: string; + + /** + *The owner who developed the connector.
+ */ + connectorOwner?: string; + + /** + *The connector name.
+ */ + connectorName?: string; + + /** + *The connector version.
+ */ + connectorVersion?: string; + + /** + *The Amazon Resource Name (ARN) for the registered connector.
+ */ + connectorArn?: string; + + /** + *The connection modes that the connector supports.
+ */ + connectorModes?: string[]; + + /** + *The authentication config required for the connector.
+ */ + authenticationConfig?: AuthenticationConfig; + + /** + *The required connector runtime settings.
+ */ + connectorRuntimeSettings?: ConnectorRuntimeSetting[]; + + /** + *A list of API versions that are supported by the connector.
+ */ + supportedApiVersions?: string[]; + + /** + *A list of operators supported by the connector.
+ */ + supportedOperators?: (Operators | string)[]; + + /** + *A list of write operations supported by the connector.
+ */ + supportedWriteOperations?: (WriteOperationType | string)[]; + + /** + *The provisioning type used to register the connector.
+ */ + connectorProvisioningType?: ConnectorProvisioningType | string; + + /** + *The configuration required for registering the connector.
+ */ + connectorProvisioningConfig?: ConnectorProvisioningConfig; + + /** + *Logo URL of the connector.
+ */ + logoURL?: string; + + /** + *The date on which the connector was registered.
+ */ + registeredAt?: Date; + + /** + *Information about who registered the connector.
+ */ + registeredBy?: string; } export namespace ConnectorConfiguration { @@ -678,6 +1090,75 @@ export namespace ConnectorConfiguration { }); } +/** + *Information about the registered connector.
+ */ +export interface ConnectorDetail { + /** + *A description about the registered connector.
+ */ + connectorDescription?: string; + + /** + *The name of the connector.
+ */ + connectorName?: string; + + /** + *The owner of the connector.
+ */ + connectorOwner?: string; + + /** + *The connector version.
+ */ + connectorVersion?: string; + + /** + *The application type of the connector.
+ */ + applicationType?: string; + + /** + *The connector type.
+ */ + connectorType?: ConnectorType | string; + + /** + *A label used for the connector.
+ */ + connectorLabel?: string; + + /** + *The time at which the connector was registered.
+ */ + registeredAt?: Date; + + /** + *The user who registered the connector.
+ */ + registeredBy?: string; + + /** + *The provisioning type that the connector uses.
+ */ + connectorProvisioningType?: ConnectorProvisioningType | string; + + /** + *The connection mode that the connector supports.
+ */ + connectorModes?: string[]; +} + +export namespace ConnectorDetail { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectorDetail): any => ({ + ...obj, + }); +} + /** *The high-level entity that can be queried in Amazon AppFlow. For example, a Salesforce * entity might be an Account or Opportunity, whereas a @@ -713,12 +1194,6 @@ export namespace ConnectorEntity { }); } -export enum WriteOperationType { - INSERT = "INSERT", - UPDATE = "UPDATE", - UPSERT = "UPSERT", -} - /** *
The properties that can be applied to a field when connector is being used as a * destination.
@@ -746,6 +1221,11 @@ export interface DestinationFieldProperties { */ isUpdatable?: boolean; + /** + *Specifies whether the field can use the default value during a Create operation.
+ */ + isDefaultedOnCreate?: boolean; + /** * A list of supported write operations. For each write operation listed, this field can be
* used in idFieldNames
when that write operation is present as a destination
@@ -777,6 +1257,11 @@ export interface SourceFieldProperties {
*
Indicates if the field can be queried.
*/ isQueryable?: boolean; + + /** + *Indicates if this timestamp field can be used for incremental queries.
+ */ + isTimestampFieldForIncrementalQueries?: boolean; } export namespace SourceFieldProperties { @@ -788,6 +1273,30 @@ export namespace SourceFieldProperties { }); } +/** + *The range of values that the property supports.
+ */ +export interface Range { + /** + *Maximum value supported by the field.
+ */ + maximum?: number; + + /** + *Minimum value supported by the field.
+ */ + minimum?: number; +} + +export namespace Range { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Range): any => ({ + ...obj, + }); +} + export enum Operator { ADDITION = "ADDITION", BETWEEN = "BETWEEN", @@ -832,6 +1341,26 @@ export interface FieldTypeDetails { *fieldType
can have two values: "true" and "false".
*/
supportedValues?: string[];
+
+ /**
+ * The regular expression pattern for the field name.
+ */ + valueRegexPattern?: string; + + /** + *The date format that the field supports.
+ */ + supportedDateFormat?: string; + + /** + *The range of values this field can hold.
+ */ + fieldValueRange?: Range; + + /** + *This is the allowable length range for this field's value.
+ */ + fieldLengthRange?: Range; } export namespace FieldTypeDetails { @@ -875,11 +1404,31 @@ export interface ConnectorEntityField { */ identifier: string | undefined; + /** + *The parent identifier of the connector field.
+ */ + parentIdentifier?: string; + /** *The label applied to a connector entity field.
*/ label?: string; + /** + *Booelan value that indicates whether this field can be used as a primary key.
+ */ + isPrimaryKey?: boolean; + + /** + *Default value that can be assigned to this field.
+ */ + defaultValue?: string; + + /** + *Booelan value that indicates whether this field is deprecated or not.
+ */ + isDeprecated?: boolean; + /** * Contains details regarding the supported FieldType
, including the
* corresponding filterOperators
and supportedValues
.
A map that has specific properties related to the ConnectorEntityField.
+ */ + customProperties?: { [key: string]: string }; } export namespace ConnectorEntityField { @@ -1288,6 +1842,11 @@ export interface ConnectorOperator { *The operation to be performed on the provided SAPOData source fields.
*/ SAPOData?: SAPODataConnectorOperator | string; + + /** + *Operators supported by the custom connector.
+ */ + CustomConnector?: Operator | string; } export namespace ConnectorOperator { @@ -1299,6 +1858,54 @@ export namespace ConnectorOperator { }); } +/** + *The OAuth 2.0 properties required for OAuth 2.0 authentication.
+ */ +export interface OAuth2Properties { + /** + *The token URL required for OAuth 2.0 authentication.
+ */ + tokenUrl: string | undefined; + + /** + *The OAuth 2.0 grant type used by connector for OAuth 2.0 authentication.
+ */ + oAuth2GrantType: OAuth2GrantType | string | undefined; +} + +export namespace OAuth2Properties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OAuth2Properties): any => ({ + ...obj, + }); +} + +/** + *The profile properties required by the custom connector.
+ */ +export interface CustomConnectorProfileProperties { + /** + *A map of properties that are required to create a profile for the custom connector.
+ */ + profileProperties?: { [key: string]: string }; + + /** + *The OAuth 2.0 properties required for OAuth 2.0 authentication.
+ */ + oAuth2Properties?: OAuth2Properties; +} + +export namespace CustomConnectorProfileProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomConnectorProfileProperties): any => ({ + ...obj, + }); +} + /** *The connector-specific profile properties required by Datadog.
*/ @@ -1468,14 +2075,14 @@ export namespace SalesforceConnectorProfileProperties { */ export interface OAuthProperties { /** - *The token url required to fetch access/refresh tokens using authorization code and also to refresh expired - * access token using refresh token.
+ *The token url required to fetch access/refresh tokens using authorization code and also + * to refresh expired access token using refresh token.
*/ tokenUrl: string | undefined; /** - *The authorization code url required to redirect to SAP Login Page to fetch authorization code for OAuth type - * authentication.
+ *The authorization code url required to redirect to SAP Login Page to fetch authorization + * code for OAuth type authentication.
*/ authCodeUrl: string | undefined; @@ -1786,6 +2393,11 @@ export interface ConnectorProfileProperties { *The connector-specific profile properties required when using SAPOData.
*/ SAPOData?: SAPODataConnectorProfileProperties; + + /** + *The properties required by the custom connector.
+ */ + CustomConnector?: CustomConnectorProfileProperties; } export namespace ConnectorProfileProperties { @@ -1864,6 +2476,11 @@ export interface ConnectorProfile { */ connectorType?: ConnectorType | string; + /** + *The label for the connector profile being created.
+ */ + connectorLabel?: string; + /** *Indicates the connection mode and if it is public or private.
*/ @@ -1895,12 +2512,126 @@ export interface ConnectorProfile { privateConnectionProvisioningState?: PrivateConnectionProvisioningState; } -export namespace ConnectorProfile { +export namespace ConnectorProfile { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectorProfile): any => ({ + ...obj, + }); +} + +/** + *The custom credentials required for custom authentication.
+ */ +export interface CustomAuthCredentials { + /** + *The custom authentication type that the connector uses.
+ */ + customAuthenticationType: string | undefined; + + /** + *A map that holds custom authentication credentials.
+ */ + credentialsMap?: { [key: string]: string }; +} + +export namespace CustomAuthCredentials { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomAuthCredentials): any => ({ + ...obj, + ...(obj.credentialsMap && { credentialsMap: SENSITIVE_STRING }), + }); +} + +/** + *The OAuth 2.0 credentials required for OAuth 2.0 authentication.
+ */ +export interface OAuth2Credentials { + /** + *The identifier for the desired client.
+ */ + clientId?: string; + + /** + *The client secret used by the OAuth client to authenticate to the authorization + * server.
+ */ + clientSecret?: string; + + /** + *The access token used to access the connector on your behalf.
+ */ + accessToken?: string; + + /** + *The refresh token used to refresh an expired access token.
+ */ + refreshToken?: string; + + /** + *Used by select connectors for which the OAuth workflow is supported, such as Salesforce, + * Google Analytics, Marketo, Zendesk, and Slack.
+ */ + oAuthRequest?: ConnectorOAuthRequest; +} + +export namespace OAuth2Credentials { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OAuth2Credentials): any => ({ + ...obj, + ...(obj.clientSecret && { clientSecret: SENSITIVE_STRING }), + ...(obj.accessToken && { accessToken: SENSITIVE_STRING }), + }); +} + +/** + *The connector-specific profile credentials that are required when using the custom + * connector.
+ */ +export interface CustomConnectorProfileCredentials { + /** + *The authentication type that the custom connector uses for authenticating while creating a + * connector profile.
+ */ + authenticationType: AuthenticationType | string | undefined; + + /** + *The basic credentials that are required for the authentication of the user.
+ */ + basic?: BasicAuthCredentials; + + /** + *The OAuth 2.0 credentials required for the authentication of the user.
+ */ + oauth2?: OAuth2Credentials; + + /** + *The API keys required for the authentication of the user.
+ */ + apiKey?: ApiKeyCredentials; + + /** + *If the connector uses the custom authentication mechanism, this holds the required + * credentials.
+ */ + custom?: CustomAuthCredentials; +} + +export namespace CustomConnectorProfileCredentials { /** * @internal */ - export const filterSensitiveLog = (obj: ConnectorProfile): any => ({ + export const filterSensitiveLog = (obj: CustomConnectorProfileCredentials): any => ({ ...obj, + ...(obj.basic && { basic: BasicAuthCredentials.filterSensitiveLog(obj.basic) }), + ...(obj.oauth2 && { oauth2: OAuth2Credentials.filterSensitiveLog(obj.oauth2) }), + ...(obj.apiKey && { apiKey: ApiKeyCredentials.filterSensitiveLog(obj.apiKey) }), + ...(obj.custom && { custom: CustomAuthCredentials.filterSensitiveLog(obj.custom) }), }); } @@ -1928,6 +2659,7 @@ export namespace DatadogConnectorProfileCredentials { */ export const filterSensitiveLog = (obj: DatadogConnectorProfileCredentials): any => ({ ...obj, + ...(obj.apiKey && { apiKey: SENSITIVE_STRING }), }); } @@ -2274,6 +3006,7 @@ export namespace SingularConnectorProfileCredentials { */ export const filterSensitiveLog = (obj: SingularConnectorProfileCredentials): any => ({ ...obj, + ...(obj.apiKey && { apiKey: SENSITIVE_STRING }), }); } @@ -2511,6 +3244,12 @@ export interface ConnectorProfileCredentials { *The connector-specific profile credentials required when using SAPOData.
*/ SAPOData?: SAPODataConnectorProfileCredentials; + + /** + *The connector-specific profile credentials that are required when using the custom + * connector.
+ */ + CustomConnector?: CustomConnectorProfileCredentials; } export namespace ConnectorProfileCredentials { @@ -2520,6 +3259,7 @@ export namespace ConnectorProfileCredentials { export const filterSensitiveLog = (obj: ConnectorProfileCredentials): any => ({ ...obj, ...(obj.Amplitude && { Amplitude: AmplitudeConnectorProfileCredentials.filterSensitiveLog(obj.Amplitude) }), + ...(obj.Datadog && { Datadog: DatadogConnectorProfileCredentials.filterSensitiveLog(obj.Datadog) }), ...(obj.GoogleAnalytics && { GoogleAnalytics: GoogleAnalyticsConnectorProfileCredentials.filterSensitiveLog(obj.GoogleAnalytics), }), @@ -2529,12 +3269,16 @@ export namespace ConnectorProfileCredentials { ...(obj.Redshift && { Redshift: RedshiftConnectorProfileCredentials.filterSensitiveLog(obj.Redshift) }), ...(obj.Salesforce && { Salesforce: SalesforceConnectorProfileCredentials.filterSensitiveLog(obj.Salesforce) }), ...(obj.ServiceNow && { ServiceNow: ServiceNowConnectorProfileCredentials.filterSensitiveLog(obj.ServiceNow) }), + ...(obj.Singular && { Singular: SingularConnectorProfileCredentials.filterSensitiveLog(obj.Singular) }), ...(obj.Slack && { Slack: SlackConnectorProfileCredentials.filterSensitiveLog(obj.Slack) }), ...(obj.Snowflake && { Snowflake: SnowflakeConnectorProfileCredentials.filterSensitiveLog(obj.Snowflake) }), ...(obj.Trendmicro && { Trendmicro: TrendmicroConnectorProfileCredentials.filterSensitiveLog(obj.Trendmicro) }), ...(obj.Veeva && { Veeva: VeevaConnectorProfileCredentials.filterSensitiveLog(obj.Veeva) }), ...(obj.Zendesk && { Zendesk: ZendeskConnectorProfileCredentials.filterSensitiveLog(obj.Zendesk) }), ...(obj.SAPOData && { SAPOData: SAPODataConnectorProfileCredentials.filterSensitiveLog(obj.SAPOData) }), + ...(obj.CustomConnector && { + CustomConnector: CustomConnectorProfileCredentials.filterSensitiveLog(obj.CustomConnector), + }), }); } @@ -2595,10 +3339,17 @@ export interface CreateConnectorProfileRequest { */ connectorType: ConnectorType | string | undefined; + /** + *The label of the connector. The label is unique for each
+ * ConnectorRegistration
in your Amazon Web Services account. Only needed if
+ * calling for CUSTOMCONNECTOR connector type/.
Indicates the connection mode and specifies whether it is public or private. Private - * flows use Amazon Web Services PrivateLink to route data over Amazon Web Services infrastructure without exposing it to the - * public internet.
+ * flows use Amazon Web Services PrivateLink to route data over Amazon Web Services infrastructure + * without exposing it to the public internet. */ connectionMode: ConnectionMode | string | undefined; @@ -2663,60 +3414,104 @@ export interface ValidationException extends __SmithyException, $MetadataBearer } /** - *The properties that are applied when Amazon Connect Customer Profiles is used as a - * destination.
+ * The settings that determine how Amazon AppFlow handles an error when placing data in the
+ * destination. For example, this setting would determine if the flow should fail after one
+ * insertion error, or continue and attempt to insert every record regardless of the initial
+ * failure. ErrorHandlingConfig
is a part of the destination connector details.
+ *
The unique name of the Amazon Connect Customer Profiles domain.
+ *Specifies if the flow should fail after the first instance of a failure when attempting + * to place data in the destination.
*/ - domainName: string | undefined; + failOnFirstDestinationError?: boolean; /** - *The object specified in the Amazon Connect Customer Profiles flow destination.
+ *Specifies the Amazon S3 bucket prefix.
*/ - objectTypeName?: string; + bucketPrefix?: string; + + /** + *Specifies the name of the Amazon S3 bucket.
+ */ + bucketName?: string; } -export namespace CustomerProfilesDestinationProperties { +export namespace ErrorHandlingConfig { /** * @internal */ - export const filterSensitiveLog = (obj: CustomerProfilesDestinationProperties): any => ({ + export const filterSensitiveLog = (obj: ErrorHandlingConfig): any => ({ ...obj, }); } /** - * The settings that determine how Amazon AppFlow handles an error when placing data in the
- * destination. For example, this setting would determine if the flow should fail after one
- * insertion error, or continue and attempt to insert every record regardless of the initial
- * failure. ErrorHandlingConfig
is a part of the destination connector details.
- *
The properties that are applied when the custom connector is being used as a + * destination.
*/ -export interface ErrorHandlingConfig { +export interface CustomConnectorDestinationProperties { /** - *Specifies if the flow should fail after the first instance of a failure when attempting - * to place data in the destination.
+ *The entity specified in the custom connector as a destination in the flow.
*/ - failOnFirstDestinationError?: boolean; + entityName: string | undefined; /** - *Specifies the Amazon S3 bucket prefix.
+ *The settings that determine how Amazon AppFlow handles an error when placing data in the + * custom connector as destination.
*/ - bucketPrefix?: string; + errorHandlingConfig?: ErrorHandlingConfig; /** - *Specifies the name of the Amazon S3 bucket.
+ *Specifies the type of write operation to be performed in the custom connector when it's + * used as destination.
*/ - bucketName?: string; + writeOperationType?: WriteOperationType | string; + + /** + *The name of the field that Amazon AppFlow uses as an ID when performing a write operation + * such as update, delete, or upsert.
+ */ + idFieldNames?: string[]; + + /** + *The custom properties that are specific to the connector when it's used as a destination + * in the flow.
+ */ + customProperties?: { [key: string]: string }; } -export namespace ErrorHandlingConfig { +export namespace CustomConnectorDestinationProperties { /** * @internal */ - export const filterSensitiveLog = (obj: ErrorHandlingConfig): any => ({ + export const filterSensitiveLog = (obj: CustomConnectorDestinationProperties): any => ({ + ...obj, + }); +} + +/** + *The properties that are applied when Amazon Connect Customer Profiles is used as a + * destination.
+ */ +export interface CustomerProfilesDestinationProperties { + /** + *The unique name of the Amazon Connect Customer Profiles domain.
+ */ + domainName: string | undefined; + + /** + *The object specified in the Amazon Connect Customer Profiles flow destination.
+ */ + objectTypeName?: string; +} + +export namespace CustomerProfilesDestinationProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomerProfilesDestinationProperties): any => ({ ...obj, }); } @@ -3180,6 +3975,11 @@ export interface DestinationConnectorProperties { *The properties required to query Zendesk.
*/ Zendesk?: ZendeskDestinationProperties; + + /** + *The properties that are required to query the custom Connector.
+ */ + CustomConnector?: CustomConnectorDestinationProperties; } export namespace DestinationConnectorProperties { @@ -3201,6 +4001,11 @@ export interface DestinationFlowConfig { */ connectorType: ConnectorType | string | undefined; + /** + *The API version that the destination connector uses.
+ */ + apiVersion?: string; + /** *The name of the connector profile. This name must be unique for each connector profile in * the Amazon Web Services account.
@@ -3243,6 +4048,31 @@ export namespace IncrementalPullConfig { }); } +/** + *The properties that are applied when the custom connector is being used as a + * source.
+ */ +export interface CustomConnectorSourceProperties { + /** + *The entity specified in the custom connector as a source in the flow.
+ */ + entityName: string | undefined; + + /** + *Custom properties that are required to use the custom connector as a source.
+ */ + customProperties?: { [key: string]: string }; +} + +export namespace CustomConnectorSourceProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomConnectorSourceProperties): any => ({ + ...obj, + }); +} + /** *The properties that are applied when Datadog is being used as a source.
*/ @@ -3652,6 +4482,12 @@ export interface SourceConnectorProperties { *The properties that are applied when using SAPOData as a flow source.
*/ SAPOData?: SAPODataSourceProperties; + + /** + *The properties that are applied when the custom connector is being used as a + * source.
+ */ + CustomConnector?: CustomConnectorSourceProperties; } export namespace SourceConnectorProperties { @@ -3673,6 +4509,11 @@ export interface SourceFlowConfig { */ connectorType: ConnectorType | string | undefined; + /** + *The API version of the connector when it's used as a source in the flow.
+ */ + apiVersion?: string; + /** *The name of the connector profile. This name must be unique for each connector profile in * the Amazon Web Services account.
@@ -4047,6 +4888,46 @@ export namespace DeleteFlowResponse { }); } +export interface DescribeConnectorRequest { + /** + *The connector type, such as CUSTOMCONNECTOR, Saleforce, Marketo. Please choose + * CUSTOMCONNECTOR for Lambda based custom connectors.
+ */ + connectorType: ConnectorType | string | undefined; + + /** + *The label of the connector. The label is unique for each
+ * ConnectorRegistration
in your Amazon Web Services account. Only needed if
+ * calling for CUSTOMCONNECTOR connector type/.
Configuration info of all the connectors that the user requested.
+ */ + connectorConfiguration?: ConnectorConfiguration; +} + +export namespace DescribeConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeConnectorResponse): any => ({ + ...obj, + }); +} + export interface DescribeConnectorEntityRequest { /** *The entity name for that connector.
@@ -4063,6 +4944,11 @@ export interface DescribeConnectorEntityRequest { *ConnectorProfile
in the Amazon Web Services account.
*/
connectorProfileName?: string;
+
+ /**
+ * The version of the API that's used by the connector.
+ */ + apiVersion?: string; } export namespace DescribeConnectorEntityRequest { @@ -4104,6 +4990,13 @@ export interface DescribeConnectorProfilesRequest { */ connectorType?: ConnectorType | string; + /** + *The name of the connector. The name is unique for each ConnectorRegistration
+ * in your Amazon Web Services account. Only needed if calling for CUSTOMCONNECTOR connector
+ * type/.
Specifies the maximum number of items that should be returned in the result set. The
* default for maxResults
is 20 (for all paginated API operations).
The maximum number of items that should be returned in the result set. The default is + * 20.
+ */ + maxResults?: number; + /** *The pagination token for the next page of data.
*/ @@ -4174,6 +5073,11 @@ export interface DescribeConnectorsResponse { */ connectorConfigurations?: { [key: string]: ConnectorConfiguration }; + /** + *Information about the connectors supported in Amazon AppFlow.
+ */ + connectors?: ConnectorDetail[]; + /** *The pagination token for the next page of data.
*/ @@ -4533,12 +5437,22 @@ export interface FlowDefinition { */ sourceConnectorType?: ConnectorType | string; + /** + *The label of the source connector in the flow.
+ */ + sourceConnectorLabel?: string; + /** *Specifies the destination connector type, such as Salesforce, Amazon S3, Amplitude, and * so on.
*/ destinationConnectorType?: ConnectorType | string; + /** + *The label of the destination connector in the flow.
+ */ + destinationConnectorLabel?: string; + /** * Specifies the type of flow trigger. This can be OnDemand
,
* Scheduled
, or Event
.
The name of the connector profile. The name is unique for each
- * ConnectorProfile
in the Amazon Web Services account, and is used to query the downstream
- * connector.
ConnectorProfile
in the Amazon Web Services account, and is used to query the
+ * downstream connector.
*/
connectorProfileName?: string;
@@ -4606,6 +5520,11 @@ export interface ListConnectorEntitiesRequest {
* roots. Otherwise, this request returns all entities supported by the provider.
*/
entitiesPath?: string;
+
+ /**
+ * The version of the API that's used by the connector.
+ */ + apiVersion?: string; } export namespace ListConnectorEntitiesRequest { @@ -4635,6 +5554,50 @@ export namespace ListConnectorEntitiesResponse { }); } +export interface ListConnectorsRequest { + /** + *Specifies the maximum number of items that should be returned in the result set. The
+ * default for maxResults
is 20 (for all paginated API operations).
The pagination token for the next page of data.
+ */ + nextToken?: string; +} + +export namespace ListConnectorsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListConnectorsRequest): any => ({ + ...obj, + }); +} + +export interface ListConnectorsResponse { + /** + *Contains information about the connectors supported by Amazon AppFlow.
+ */ + connectors?: ConnectorDetail[]; + + /** + *The pagination token for the next page of data. If nextToken=null, this means that all + * records have been fetched.
+ */ + nextToken?: string; +} + +export namespace ListConnectorsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListConnectorsResponse): any => ({ + ...obj, + }); +} + export interface ListFlowsRequest { /** *Specifies the maximum number of items that should be returned in the result set.
@@ -4709,6 +5672,66 @@ export namespace ListTagsForResourceResponse { }); } +export interface RegisterConnectorRequest { + /** + * The name of the connector. The name is unique for each ConnectorRegistration
+ * in your Amazon Web Services account.
A description about the connector that's being registered.
+ */ + description?: string; + + /** + *The provisioning type of the connector. Currently the only supported value is LAMBDA. + *
+ */ + connectorProvisioningType?: ConnectorProvisioningType | string; + + /** + *The provisioning type of the connector. Currently the only supported value is + * LAMBDA.
+ */ + connectorProvisioningConfig?: ConnectorProvisioningConfig; +} + +export namespace RegisterConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegisterConnectorRequest): any => ({ + ...obj, + }); +} + +export interface RegisterConnectorResponse { + /** + *The ARN of the connector being registered.
+ */ + connectorArn?: string; +} + +export namespace RegisterConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegisterConnectorResponse): any => ({ + ...obj, + }); +} + +/** + *API calls have exceeded the maximum allowed API request rate per account and per Region. + *
+ */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message?: string; +} + export interface StartFlowRequest { /** *The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens @@ -4832,6 +5855,40 @@ export namespace TagResourceResponse { }); } +export interface UnregisterConnectorRequest { + /** + *
The label of the connector. The label is unique for each
+ * ConnectorRegistration
in your Amazon Web Services account.
Indicates whether Amazon AppFlow should unregister the connector, even if it is currently + * in use in one or more connector profiles. The default value is false.
+ */ + forceDelete?: boolean; +} + +export namespace UnregisterConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UnregisterConnectorRequest): any => ({ + ...obj, + }); +} + +export interface UnregisterConnectorResponse {} + +export namespace UnregisterConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UnregisterConnectorResponse): any => ({ + ...obj, + }); +} + export interface UntagResourceRequest { /** *The Amazon Resource Name (ARN) of the flow that you want to untag.
diff --git a/clients/client-appflow/src/pagination/DescribeConnectorsPaginator.ts b/clients/client-appflow/src/pagination/DescribeConnectorsPaginator.ts index 3fbad393ab371..6b75aaab8722e 100644 --- a/clients/client-appflow/src/pagination/DescribeConnectorsPaginator.ts +++ b/clients/client-appflow/src/pagination/DescribeConnectorsPaginator.ts @@ -42,6 +42,7 @@ export async function* paginateDescribeConnectors( let page: DescribeConnectorsCommandOutput; while (hasNext) { input.nextToken = token; + input["maxResults"] = config.pageSize; if (config.client instanceof Appflow) { page = await makePagedRequest(config.client, input, ...additionalArguments); } else if (config.client instanceof AppflowClient) { diff --git a/clients/client-appflow/src/pagination/ListConnectorsPaginator.ts b/clients/client-appflow/src/pagination/ListConnectorsPaginator.ts new file mode 100644 index 0000000000000..7cbe7dd3d1a96 --- /dev/null +++ b/clients/client-appflow/src/pagination/ListConnectorsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { Appflow } from "../Appflow"; +import { AppflowClient } from "../AppflowClient"; +import { + ListConnectorsCommand, + ListConnectorsCommandInput, + ListConnectorsCommandOutput, +} from "../commands/ListConnectorsCommand"; +import { AppflowPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AppflowClient, + input: ListConnectorsCommandInput, + ...args: any +): PromiseQueryExecutionId
from the Athena query results location in
* Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query
* but returns results. Use StartQueryExecution to run a query.
+ * If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner setting, the setting also
+ * applies to Amazon S3 read operations when GetQueryResults
is
+ * called. If an expected bucket owner has been specified and the query results are in an
+ * Amazon S3 bucket whose owner account ID is different from the expected
+ * bucket owner, the GetQueryResults
call fails with an Amazon S3
+ * permissions error.
To stream query results successfully, the IAM principal with permission to call
* GetQueryResults
also must have permissions to the Amazon S3
* GetObject
action for the Athena query results location.
QueryExecutionId
from the Athena query results location in
* Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query
* but returns results. Use StartQueryExecution to run a query.
+ * If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner setting, the setting also
+ * applies to Amazon S3 read operations when GetQueryResults
is
+ * called. If an expected bucket owner has been specified and the query results are in an
+ * Amazon S3 bucket whose owner account ID is different from the expected
+ * bucket owner, the GetQueryResults
call fails with an Amazon S3
+ * permissions error.
To stream query results successfully, the IAM principal with permission to call
* GetQueryResults
also must have permissions to the Amazon S3
* GetObject
action for the Athena query results location.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.
+ * If set, Athena uses the value for ExpectedBucketOwner
when it
+ * makes Amazon S3 calls to your specified output location. If the
+ * ExpectedBucketOwner
+ * Amazon Web Services account ID does not match the actual owner of the Amazon S3
+ * bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings,
+ * then the query uses the ExpectedBucketOwner
setting that is specified for
+ * the workgroup, and also uses the location for storing query results specified in the
+ * workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration
+ * and Workgroup Settings Override Client-Side Settings.
OutputLocation
in ResultConfigurationUpdates
(the
* client-side setting), the OutputLocation
in the workgroup's
- * ResultConfiguration
will be updated with the new value. For more
+ * ResultConfiguration
is updated with the new value. For more
* information, see Workgroup Settings Override
* Client-Side Settings.
*/
@@ -2744,11 +2759,36 @@ export interface ResultConfigurationUpdates {
* and set to null. If set to "false" or not set, and a value is present in the
* EncryptionConfiguration
in ResultConfigurationUpdates
(the
* client-side setting), the EncryptionConfiguration
in the workgroup's
- * ResultConfiguration
will be updated with the new value. For more
+ * ResultConfiguration
is updated with the new value. For more
* information, see Workgroup Settings Override
* Client-Side Settings.
*/
RemoveEncryptionConfiguration?: boolean;
+
+ /**
+ * The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.
+ * If set, Athena uses the value for ExpectedBucketOwner
when it
+ * makes Amazon S3 calls to your specified output location. If the
+ * ExpectedBucketOwner
+ * Amazon Web Services account ID does not match the actual owner of the Amazon S3
+ * bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the
+ * ExpectedBucketOwner
setting that is specified for the workgroup, and
+ * also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
If set to "true", removes the Amazon Web Services account ID previously specified for
+ * ResultConfiguration$ExpectedBucketOwner. If set to "false" or not
+ * set, and a value is present in the ExpectedBucketOwner
in
+ * ResultConfigurationUpdates
(the client-side setting), the
+ * ExpectedBucketOwner
in the workgroup's ResultConfiguration
+ * is updated with the new value. For more information, see Workgroup Settings Override
+ * Client-Side Settings.
Creates a model-specific endpoint for synchronous inference for a previously trained - * custom model - *
+ * custom model */ public createEndpoint( args: CreateEndpointCommandInput, @@ -766,6 +781,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *Deletes a resource-based policy that is attached to a custom model.
+ */ + public deleteResourcePolicy( + args: DeleteResourcePolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets the properties associated with a document classification job. Use this operation to * get the status of a classification job.
@@ -1061,6 +1108,39 @@ export class Comprehend extends ComprehendClient { } } + /** + *Gets the details of a resource-based policy that is attached to a custom model, including + * the JSON body of the policy.
+ */ + public describeResourcePolicy( + args: DescribeResourcePolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets the properties associated with a sentiment detection job. Use this operation to get * the status of a detection job.
@@ -1321,6 +1401,37 @@ export class Comprehend extends ComprehendClient { } } + /** + *Creates a new custom model that replicates a source custom model that you import. The + * source model can be in your AWS account or another one.
+ *If the source model is in another AWS account, then it must have a resource-based policy + * that authorizes you to import it.
+ *The source model must be in the same AWS region that you're using when you import. You + * can't import a model that's in a different region.
+ */ + public importModel(args: ImportModelCommandInput, options?: __HttpHandlerOptions): PromiseGets a list of the documentation classification jobs that you have submitted.
*/ @@ -1774,6 +1885,40 @@ export class Comprehend extends ComprehendClient { } } + /** + *Attaches a resource-based policy to a custom model. You can use this policy to authorize + * an entity in another AWS account to import the custom model, which replicates it in Amazon + * Comprehend in their account.
+ */ + public putResourcePolicy( + args: PutResourcePolicyCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts an asynchronous document classification job. Use the operation to track the progress of the * job.
diff --git a/clients/client-comprehend/src/ComprehendClient.ts b/clients/client-comprehend/src/ComprehendClient.ts index a3d9e876f472f..46d8ca4db2c8f 100644 --- a/clients/client-comprehend/src/ComprehendClient.ts +++ b/clients/client-comprehend/src/ComprehendClient.ts @@ -90,6 +90,10 @@ import { DeleteEntityRecognizerCommandInput, DeleteEntityRecognizerCommandOutput, } from "./commands/DeleteEntityRecognizerCommand"; +import { + DeleteResourcePolicyCommandInput, + DeleteResourcePolicyCommandOutput, +} from "./commands/DeleteResourcePolicyCommand"; import { DescribeDocumentClassificationJobCommandInput, DescribeDocumentClassificationJobCommandOutput, @@ -123,6 +127,10 @@ import { DescribePiiEntitiesDetectionJobCommandInput, DescribePiiEntitiesDetectionJobCommandOutput, } from "./commands/DescribePiiEntitiesDetectionJobCommand"; +import { + DescribeResourcePolicyCommandInput, + DescribeResourcePolicyCommandOutput, +} from "./commands/DescribeResourcePolicyCommand"; import { DescribeSentimentDetectionJobCommandInput, DescribeSentimentDetectionJobCommandOutput, @@ -140,6 +148,7 @@ import { DetectKeyPhrasesCommandInput, DetectKeyPhrasesCommandOutput } from "./c import { DetectPiiEntitiesCommandInput, DetectPiiEntitiesCommandOutput } from "./commands/DetectPiiEntitiesCommand"; import { DetectSentimentCommandInput, DetectSentimentCommandOutput } from "./commands/DetectSentimentCommand"; import { DetectSyntaxCommandInput, DetectSyntaxCommandOutput } from "./commands/DetectSyntaxCommand"; +import { ImportModelCommandInput, ImportModelCommandOutput } from "./commands/ImportModelCommand"; import { ListDocumentClassificationJobsCommandInput, ListDocumentClassificationJobsCommandOutput, @@ -193,6 +202,7 @@ import { ListTopicsDetectionJobsCommandInput, ListTopicsDetectionJobsCommandOutput, } from "./commands/ListTopicsDetectionJobsCommand"; +import { PutResourcePolicyCommandInput, PutResourcePolicyCommandOutput } from "./commands/PutResourcePolicyCommand"; import { StartDocumentClassificationJobCommandInput, StartDocumentClassificationJobCommandOutput, @@ -276,6 +286,7 @@ export type ServiceInputTypes = | DeleteDocumentClassifierCommandInput | DeleteEndpointCommandInput | DeleteEntityRecognizerCommandInput + | DeleteResourcePolicyCommandInput | DescribeDocumentClassificationJobCommandInput | DescribeDocumentClassifierCommandInput | DescribeDominantLanguageDetectionJobCommandInput @@ -285,6 +296,7 @@ export type ServiceInputTypes = | DescribeEventsDetectionJobCommandInput | DescribeKeyPhrasesDetectionJobCommandInput | DescribePiiEntitiesDetectionJobCommandInput + | DescribeResourcePolicyCommandInput | DescribeSentimentDetectionJobCommandInput | DescribeTopicsDetectionJobCommandInput | DetectDominantLanguageCommandInput @@ -293,6 +305,7 @@ export type ServiceInputTypes = | DetectPiiEntitiesCommandInput | DetectSentimentCommandInput | DetectSyntaxCommandInput + | ImportModelCommandInput | ListDocumentClassificationJobsCommandInput | ListDocumentClassifierSummariesCommandInput | ListDocumentClassifiersCommandInput @@ -307,6 +320,7 @@ export type ServiceInputTypes = | ListSentimentDetectionJobsCommandInput | ListTagsForResourceCommandInput | ListTopicsDetectionJobsCommandInput + | PutResourcePolicyCommandInput | StartDocumentClassificationJobCommandInput | StartDominantLanguageDetectionJobCommandInput | StartEntitiesDetectionJobCommandInput @@ -341,6 +355,7 @@ export type ServiceOutputTypes = | DeleteDocumentClassifierCommandOutput | DeleteEndpointCommandOutput | DeleteEntityRecognizerCommandOutput + | DeleteResourcePolicyCommandOutput | DescribeDocumentClassificationJobCommandOutput | DescribeDocumentClassifierCommandOutput | DescribeDominantLanguageDetectionJobCommandOutput @@ -350,6 +365,7 @@ export type ServiceOutputTypes = | DescribeEventsDetectionJobCommandOutput | DescribeKeyPhrasesDetectionJobCommandOutput | DescribePiiEntitiesDetectionJobCommandOutput + | DescribeResourcePolicyCommandOutput | DescribeSentimentDetectionJobCommandOutput | DescribeTopicsDetectionJobCommandOutput | DetectDominantLanguageCommandOutput @@ -358,6 +374,7 @@ export type ServiceOutputTypes = | DetectPiiEntitiesCommandOutput | DetectSentimentCommandOutput | DetectSyntaxCommandOutput + | ImportModelCommandOutput | ListDocumentClassificationJobsCommandOutput | ListDocumentClassifierSummariesCommandOutput | ListDocumentClassifiersCommandOutput @@ -372,6 +389,7 @@ export type ServiceOutputTypes = | ListSentimentDetectionJobsCommandOutput | ListTagsForResourceCommandOutput | ListTopicsDetectionJobsCommandOutput + | PutResourcePolicyCommandOutput | StartDocumentClassificationJobCommandOutput | StartDominantLanguageDetectionJobCommandOutput | StartEntitiesDetectionJobCommandOutput diff --git a/clients/client-comprehend/src/commands/CreateEndpointCommand.ts b/clients/client-comprehend/src/commands/CreateEndpointCommand.ts index e1626cd5b1f9b..794d06edeb294 100644 --- a/clients/client-comprehend/src/commands/CreateEndpointCommand.ts +++ b/clients/client-comprehend/src/commands/CreateEndpointCommand.ts @@ -23,8 +23,7 @@ export interface CreateEndpointCommandOutput extends CreateEndpointResponse, __M /** *Creates a model-specific endpoint for synchronous inference for a previously trained - * custom model - *
+ * custom model * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-comprehend/src/commands/DeleteResourcePolicyCommand.ts b/clients/client-comprehend/src/commands/DeleteResourcePolicyCommand.ts new file mode 100644 index 0000000000000..4686f40d9d760 --- /dev/null +++ b/clients/client-comprehend/src/commands/DeleteResourcePolicyCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { DeleteResourcePolicyRequest, DeleteResourcePolicyResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DeleteResourcePolicyCommand, + serializeAws_json1_1DeleteResourcePolicyCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeleteResourcePolicyCommandInput extends DeleteResourcePolicyRequest {} +export interface DeleteResourcePolicyCommandOutput extends DeleteResourcePolicyResponse, __MetadataBearer {} + +/** + *Deletes a resource-based policy that is attached to a custom model.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, DeleteResourcePolicyCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, DeleteResourcePolicyCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new DeleteResourcePolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteResourcePolicyCommandInput} for command's `input` shape. + * @see {@link DeleteResourcePolicyCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class DeleteResourcePolicyCommand extends $Command< + DeleteResourcePolicyCommandInput, + DeleteResourcePolicyCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteResourcePolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackGets the details of a resource-based policy that is attached to a custom model, including + * the JSON body of the policy.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, DescribeResourcePolicyCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, DescribeResourcePolicyCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new DescribeResourcePolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeResourcePolicyCommandInput} for command's `input` shape. + * @see {@link DescribeResourcePolicyCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class DescribeResourcePolicyCommand extends $Command< + DescribeResourcePolicyCommandInput, + DescribeResourcePolicyCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeResourcePolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a new custom model that replicates a source custom model that you import. The + * source model can be in your AWS account or another one.
+ *If the source model is in another AWS account, then it must have a resource-based policy + * that authorizes you to import it.
+ *The source model must be in the same AWS region that you're using when you import. You + * can't import a model that's in a different region.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, ImportModelCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, ImportModelCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new ImportModelCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ImportModelCommandInput} for command's `input` shape. + * @see {@link ImportModelCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class ImportModelCommand extends $Command< + ImportModelCommandInput, + ImportModelCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ImportModelCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackAttaches a resource-based policy to a custom model. You can use this policy to authorize + * an entity in another AWS account to import the custom model, which replicates it in Amazon + * Comprehend in their account.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, PutResourcePolicyCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, PutResourcePolicyCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new PutResourcePolicyCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutResourcePolicyCommandInput} for command's `input` shape. + * @see {@link PutResourcePolicyCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class PutResourcePolicyCommand extends $Command< + PutResourcePolicyCommandInput, + PutResourcePolicyCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutResourcePolicyCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe purpose of the data you've provided in the augmented manifest. You can either train or test this data. If you don't specify, the default is train.
- *TRAIN - all of the documents in the manifest will be used for training. If no test documents are provided, Amazon Comprehend will automatically reserve a portion of the training documents for testing.
+ *The purpose of the data you've provided in the augmented manifest. You can either train or + * test this data. If you don't specify, the default is train.
+ *TRAIN - all of the documents in the manifest will be used for training. If no test + * documents are provided, Amazon Comprehend will automatically reserve a portion of the training + * documents for testing.
*TEST - all of the documents in the manifest will be used for testing.
*/ Split?: Split | string; @@ -41,26 +44,31 @@ export interface AugmentedManifestsListItem { AttributeNames: string[] | undefined; /** - *The S3 prefix to the annotation files that are referred in the augmented manifest file.
+ *The S3 prefix to the annotation files that are referred in the augmented manifest + * file.
*/ AnnotationDataS3Uri?: string; /** - *The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest file.
+ *The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest + * file.
*/ SourceDocumentsS3Uri?: string; /** - *The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't specify, the default is PlainTextDocument.
+ *The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't + * specify, the default is PlainTextDocument.
*
- * PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that is encoded in UTF-8.
PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that
+ * is encoded in UTF-8.
*
- * SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural context, like a PDF. For training with Amazon Comprehend, only PDFs are supported.
- * For inference, Amazon Comprehend support PDFs, DOCX and TXT.
SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural
+ * context, like a PDF. For training with Amazon Comprehend, only PDFs are supported. For
+ * inference, Amazon Comprehend support PDFs, DOCX and TXT.
* The result of calling the operation. The operation - * returns one object that is successfully processed by the operation.
+ *The result of calling the operation. The + * operation returns one object that is successfully processed by the operation.
*/ export interface BatchDetectSyntaxItemResult { /** @@ -814,10 +822,10 @@ export namespace BatchDetectSyntaxItemResult { export interface BatchDetectSyntaxResponse { /** - *A list of objects containing the results
- * of the operation. The results are sorted in ascending order by the Index
field
- * and match the order of the documents in the input list. If all of the documents contain an
- * error, the ResultList
is empty.
A list of objects containing the
+ * results of the operation. The results are sorted in ascending order by the Index
+ * field and match the order of the documents in the input list. If all of the documents contain
+ * an error, the ResultList
is empty.
The Amazon S3 URI for the input data. - * The Amazon S3 bucket must be in the same AWS Region as the API endpoint that you are calling. - * The URI can point to a single input file or it can provide the prefix for a collection of input files.
+ *The Amazon S3 URI for the input data. The Amazon S3 bucket must be in the same AWS Region + * as the API endpoint that you are calling. The URI can point to a single input file or it can + * provide the prefix for a collection of input files.
*/ TestS3Uri?: string; @@ -1360,9 +1368,10 @@ export interface CreateDocumentClassifierRequest { DocumentClassifierName: string | undefined; /** - *The version name given to the newly created classifier. - * Version names can have a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. - * The version name must be unique among all models with the same classifier name in the account/AWS Region.
+ *The version name given to the newly created classifier. Version names can have a maximum + * of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The + * version name must be unique among all models with the same classifier name in the account/AWS + * Region.
*/ VersionName?: string; @@ -1454,6 +1463,24 @@ export interface CreateDocumentClassifierRequest { * */ ModelKmsKeyId?: string; + + /** + *The resource-based policy to attach to your custom document classifier model. You can use + * this policy to allow another AWS account to import your custom model.
+ *Provide your policy as a JSON body that you enter as a UTF-8 encoded string without line + * breaks. To provide valid JSON, enclose the attribute names and values in double quotes. If the + * JSON body is also enclosed in double quotes, then you must escape the double quotes that are + * inside the policy:
+ *
+ * "{\"attribute\": \"value\", \"attribute\": [\"value\"]}"
+ *
To avoid escaping quotes, you can use single quotes to enclose the policy and double + * quotes to enclose the JSON names and values:
+ *
+ * '{"attribute": "value", "attribute": ["value"]}'
+ *
This specifies the Amazon S3 location where the test annotations for an entity recognizer are located. - * The URI must be in the same AWS Region as the API endpoint that you are calling.
+ *This specifies the Amazon S3 location where the test annotations for an entity recognizer + * are located. The URI must be in the same AWS Region as the API endpoint that you are + * calling.
*/ TestS3Uri?: string; } @@ -1655,17 +1683,18 @@ export interface EntityRecognizerDocuments { S3Uri: string | undefined; /** - *Specifies the Amazon S3 location where the test documents for an entity recognizer are located. - * The URI must be in the same AWS Region as the API endpoint that you are calling.
+ *Specifies the Amazon S3 location where the test documents for an entity recognizer are + * located. The URI must be in the same AWS Region as the API endpoint that you are + * calling.
*/ TestS3Uri?: string; /** - *Specifies how the text in an input file should be processed. This is optional, and the default is ONE_DOC_PER_LINE. - * - * ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers. - * - * ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
+ *Specifies how the text in an input file should be processed. This is optional, and the + * default is ONE_DOC_PER_LINE. ONE_DOC_PER_FILE - Each file is considered a separate document. + * Use this option when you are processing large documents, such as newspaper articles or + * scientific papers. ONE_DOC_PER_LINE - Each line in a file is considered a separate document. + * Use this option when you are processing many short documents, such as text messages.
*/ InputFormat?: InputFormat | string; } @@ -1812,9 +1841,10 @@ export interface CreateEntityRecognizerRequest { RecognizerName: string | undefined; /** - *The version name given to the newly created recognizer. - * Version names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. - * The version name must be unique among all models with the same recognizer name in the account/ AWS Region.
+ *The version name given to the newly created recognizer. Version names can be a maximum of + * 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The + * version name must be unique among all models with the same recognizer name in the account/ AWS + * Region.
*/ VersionName?: string; @@ -1893,6 +1923,24 @@ export interface CreateEntityRecognizerRequest { * */ ModelKmsKeyId?: string; + + /** + *The JSON resource-based policy to attach to your custom entity recognizer model. You can + * use this policy to allow another AWS account to import your custom model.
+ *Provide your JSON as a UTF-8 encoded string without line breaks. To provide valid JSON for + * your policy, enclose the attribute names and values in double quotes. If the JSON body is also + * enclosed in double quotes, then you must escape the double quotes that are inside the + * policy:
+ *
+ * "{\"attribute\": \"value\", \"attribute\": [\"value\"]}"
+ *
To avoid escaping quotes, you can use single quotes to enclose the policy and double + * quotes to enclose the JSON names and values:
+ *
+ * '{"attribute": "value", "attribute": ["value"]}'
+ *
The Amazon Resource Name (ARN) of the custom model version that has the policy to delete.
+ */ + ResourceArn: string | undefined; + + /** + *The revision ID of the policy to delete.
+ */ + PolicyRevisionId?: string; +} + +export namespace DeleteResourcePolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteResourcePolicyRequest): any => ({ + ...obj, + }); +} + +export interface DeleteResourcePolicyResponse {} + +export namespace DeleteResourcePolicyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteResourcePolicyResponse): any => ({ + ...obj, + }); +} + export interface DescribeDocumentClassificationJobRequest { /** *The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its @@ -2042,11 +2122,13 @@ export interface DocumentReaderConfig { *
- * TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText for PDF documents per page.
TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText
+ * for PDF documents per page.
*
- * TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF documents per page.
TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF
+ * documents per page.
*
- * SERVICE_DEFAULT
- use service defaults for Document reading. For Digital PDF it would mean using an internal parser instead of Textract APIs
SERVICE_DEFAULT
- use service defaults for Document reading. For
+ * Digital PDF it would mean using an internal parser instead of Textract APIs
*
- * FORCE_DOCUMENT_READ_ACTION
- Always use specified action for DocumentReadAction, including Digital PDF.
- *
FORCE_DOCUMENT_READ_ACTION
- Always use specified action for
+ * DocumentReadAction, including Digital PDF.
* The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob.
- *Use DocumentReaderConfig to provide specifications about how you want your inference documents read. - * Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.
+ *The document reader config field applies only for InputDataConfig of + * StartEntitiesDetectionJob.
+ *Use DocumentReaderConfig to provide specifications about how you want your inference + * documents read. Currently it applies for PDF documents in StartEntitiesDetectionJob custom + * inference.
*/ DocumentReaderConfig?: DocumentReaderConfig; } @@ -2482,6 +2567,12 @@ export interface DocumentClassifierProperties { *The version name that you assigned to the document classifier.
*/ VersionName?: string; + + /** + *The Amazon Resource Name (ARN) of the source model. This model was imported from a + * different AWS account to create the document classifier model in your AWS account.
+ */ + SourceModelArn?: string; } export namespace DocumentClassifierProperties { @@ -2701,7 +2792,8 @@ export interface EndpointProperties { ModelArn?: string; /** - *ARN of the new model to use for updating an existing endpoint. This ARN is going to be different from the model ARN when the update is in progress
+ *ARN of the new model to use for updating an existing endpoint. This ARN is going to be + * different from the model ARN when the update is in progress
*/ DesiredModelArn?: string; @@ -2735,7 +2827,8 @@ export interface EndpointProperties { DataAccessRoleArn?: string; /** - *Data access role ARN to use in case the new model is encrypted with a customer KMS key.
+ *Data access role ARN to use in case the new model is encrypted with a customer KMS + * key.
*/ DesiredDataAccessRoleArn?: string; } @@ -3171,6 +3264,12 @@ export interface EntityRecognizerProperties { *The version name you assigned to the entity recognizer.
*/ VersionName?: string; + + /** + *The Amazon Resource Name (ARN) of the source model. This model was imported from a + * different AWS account to create the entity recognizer model in your AWS account.
+ */ + SourceModelArn?: string; } export namespace EntityRecognizerProperties { @@ -3656,6 +3755,54 @@ export namespace DescribePiiEntitiesDetectionJobResponse { }); } +export interface DescribeResourcePolicyRequest { + /** + *The Amazon Resource Name (ARN) of the policy to describe.
+ */ + ResourceArn: string | undefined; +} + +export namespace DescribeResourcePolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeResourcePolicyRequest): any => ({ + ...obj, + }); +} + +export interface DescribeResourcePolicyResponse { + /** + *The JSON body of the resource-based policy.
+ */ + ResourcePolicy?: string; + + /** + *The time at which the policy was created.
+ */ + CreationTime?: Date; + + /** + *The time at which the policy was last modified.
+ */ + LastModifiedTime?: Date; + + /** + *The revision ID of the policy. Each time you modify a policy, Amazon Comprehend assigns a + * new revision ID, and it deletes the prior version of the policy.
+ */ + PolicyRevisionId?: string; +} + +export namespace DescribeResourcePolicyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeResourcePolicyResponse): any => ({ + ...obj, + }); +} + export interface DescribeSentimentDetectionJobRequest { /** *The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its @@ -4245,6 +4392,84 @@ export namespace DetectSyntaxResponse { }); } +export interface ImportModelRequest { + /** + *
The Amazon Resource Name (ARN) of the custom model to import.
+ */ + SourceModelArn: string | undefined; + + /** + *The name to assign to the custom model that is created in Amazon Comprehend by this + * import.
+ */ + ModelName?: string; + + /** + *The version name given to the custom model that is created by this import. Version names + * can have a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) + * are allowed. The version name must be unique among all models with the same classifier name in + * the account/AWS Region.
+ */ + VersionName?: string; + + /** + *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * trained custom models. The ModelKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that allows + * Amazon Comprehend to use Amazon Key Management Service (KMS) to encrypt or decrypt the custom + * model.
+ */ + DataAccessRoleArn?: string; + + /** + *Tags to be associated with the custom model that is created by this import. A tag is a + * key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a + * tag with "Sales" as the key might be added to a resource to indicate its use by the sales + * department.
+ */ + Tags?: Tag[]; +} + +export namespace ImportModelRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportModelRequest): any => ({ + ...obj, + }); +} + +export interface ImportModelResponse { + /** + *The Amazon Resource Name (ARN) of the custom model being imported.
+ */ + ModelArn?: string; +} + +export namespace ImportModelResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportModelResponse): any => ({ + ...obj, + }); +} + /** *The filter specified for the operation is invalid. Specify a different * filter.
@@ -4518,7 +4743,8 @@ export namespace ListDocumentClassifierSummariesResponse { /** *Provides information for filtering a list of dominant language detection jobs. For more - * information, see the operation.
+ * information, see the + * operation. */ export interface DominantLanguageDetectionJobFilter { /** @@ -5433,6 +5659,62 @@ export namespace ListTopicsDetectionJobsResponse { }); } +export interface PutResourcePolicyRequest { + /** + *The Amazon Resource Name (ARN) of the custom model to attach the policy to.
+ */ + ResourceArn: string | undefined; + + /** + *The JSON resource-based policy to attach to your custom model. Provide your JSON as a + * UTF-8 encoded string without line breaks. To provide valid JSON for your policy, enclose the + * attribute names and values in double quotes. If the JSON body is also enclosed in double + * quotes, then you must escape the double quotes that are inside the policy:
+ *
+ * "{\"attribute\": \"value\", \"attribute\": [\"value\"]}"
+ *
To avoid escaping quotes, you can use single quotes to enclose the policy and double + * quotes to enclose the JSON names and values:
+ *
+ * '{"attribute": "value", "attribute": ["value"]}'
+ *
The revision ID that Amazon Comprehend assigned to the policy that you are updating. If + * you are creating a new policy that has no prior version, don't use this parameter. Amazon + * Comprehend creates the revision ID for you.
+ */ + PolicyRevisionId?: string; +} + +export namespace PutResourcePolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutResourcePolicyRequest): any => ({ + ...obj, + }); +} + +export interface PutResourcePolicyResponse { + /** + *The revision ID of the policy. Each time you modify a policy, Amazon Comprehend assigns a + * new revision ID, and it deletes the prior version of the policy.
+ */ + PolicyRevisionId?: string; +} + +export namespace PutResourcePolicyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutResourcePolicyResponse): any => ({ + ...obj, + }); +} + export interface StartDocumentClassificationJobRequest { /** *The identifier of the job.
diff --git a/clients/client-comprehend/src/protocols/Aws_json1_1.ts b/clients/client-comprehend/src/protocols/Aws_json1_1.ts index f1a804ddfb32c..ce980382c2175 100644 --- a/clients/client-comprehend/src/protocols/Aws_json1_1.ts +++ b/clients/client-comprehend/src/protocols/Aws_json1_1.ts @@ -58,6 +58,10 @@ import { DeleteEntityRecognizerCommandInput, DeleteEntityRecognizerCommandOutput, } from "../commands/DeleteEntityRecognizerCommand"; +import { + DeleteResourcePolicyCommandInput, + DeleteResourcePolicyCommandOutput, +} from "../commands/DeleteResourcePolicyCommand"; import { DescribeDocumentClassificationJobCommandInput, DescribeDocumentClassificationJobCommandOutput, @@ -91,6 +95,10 @@ import { DescribePiiEntitiesDetectionJobCommandInput, DescribePiiEntitiesDetectionJobCommandOutput, } from "../commands/DescribePiiEntitiesDetectionJobCommand"; +import { + DescribeResourcePolicyCommandInput, + DescribeResourcePolicyCommandOutput, +} from "../commands/DescribeResourcePolicyCommand"; import { DescribeSentimentDetectionJobCommandInput, DescribeSentimentDetectionJobCommandOutput, @@ -108,6 +116,7 @@ import { DetectKeyPhrasesCommandInput, DetectKeyPhrasesCommandOutput } from "../ import { DetectPiiEntitiesCommandInput, DetectPiiEntitiesCommandOutput } from "../commands/DetectPiiEntitiesCommand"; import { DetectSentimentCommandInput, DetectSentimentCommandOutput } from "../commands/DetectSentimentCommand"; import { DetectSyntaxCommandInput, DetectSyntaxCommandOutput } from "../commands/DetectSyntaxCommand"; +import { ImportModelCommandInput, ImportModelCommandOutput } from "../commands/ImportModelCommand"; import { ListDocumentClassificationJobsCommandInput, ListDocumentClassificationJobsCommandOutput, @@ -161,6 +170,7 @@ import { ListTopicsDetectionJobsCommandInput, ListTopicsDetectionJobsCommandOutput, } from "../commands/ListTopicsDetectionJobsCommand"; +import { PutResourcePolicyCommandInput, PutResourcePolicyCommandOutput } from "../commands/PutResourcePolicyCommand"; import { StartDocumentClassificationJobCommandInput, StartDocumentClassificationJobCommandOutput, @@ -266,6 +276,8 @@ import { DeleteEndpointResponse, DeleteEntityRecognizerRequest, DeleteEntityRecognizerResponse, + DeleteResourcePolicyRequest, + DeleteResourcePolicyResponse, DescribeDocumentClassificationJobRequest, DescribeDocumentClassificationJobResponse, DescribeDocumentClassifierRequest, @@ -284,6 +296,8 @@ import { DescribeKeyPhrasesDetectionJobResponse, DescribePiiEntitiesDetectionJobRequest, DescribePiiEntitiesDetectionJobResponse, + DescribeResourcePolicyRequest, + DescribeResourcePolicyResponse, DescribeSentimentDetectionJobRequest, DescribeSentimentDetectionJobResponse, DescribeTopicsDetectionJobRequest, @@ -334,6 +348,8 @@ import { EntityTypesListItem, EventsDetectionJobFilter, EventsDetectionJobProperties, + ImportModelRequest, + ImportModelResponse, InputDataConfig, InternalServerException, InvalidFilterException, @@ -378,6 +394,8 @@ import { PiiEntity, PiiEntityType, PiiOutputDataConfig, + PutResourcePolicyRequest, + PutResourcePolicyResponse, RedactionConfig, ResourceInUseException, ResourceLimitExceededException, @@ -605,6 +623,19 @@ export const serializeAws_json1_1DeleteEntityRecognizerCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteResourcePolicyCommand = async ( + input: DeleteResourcePolicyCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.DeleteResourcePolicy", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteResourcePolicyRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeDocumentClassificationJobCommand = async ( input: DescribeDocumentClassificationJobCommandInput, context: __SerdeContext @@ -722,6 +753,19 @@ export const serializeAws_json1_1DescribePiiEntitiesDetectionJobCommand = async return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeResourcePolicyCommand = async ( + input: DescribeResourcePolicyCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.DescribeResourcePolicy", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeResourcePolicyRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeSentimentDetectionJobCommand = async ( input: DescribeSentimentDetectionJobCommandInput, context: __SerdeContext @@ -826,6 +870,19 @@ export const serializeAws_json1_1DetectSyntaxCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ImportModelCommand = async ( + input: ImportModelCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.ImportModel", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ImportModelRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1ListDocumentClassificationJobsCommand = async ( input: ListDocumentClassificationJobsCommandInput, context: __SerdeContext @@ -1008,6 +1065,19 @@ export const serializeAws_json1_1ListTopicsDetectionJobsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1PutResourcePolicyCommand = async ( + input: PutResourcePolicyCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Comprehend_20171127.PutResourcePolicy", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1PutResourcePolicyRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1StartDocumentClassificationJobCommand = async ( input: StartDocumentClassificationJobCommandInput, context: __SerdeContext @@ -2437,6 +2507,76 @@ const deserializeAws_json1_1DeleteEntityRecognizerCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1DeleteResourcePolicyCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise
- * GetSavingsPlansCoverage
doesn't support filtering by tags. GetSavingsPlansCoverage
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
To determine valid values for a dimension, use the GetDimensionValues
operation.
Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Management account in an organization have access to member accounts. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
You cannot group by any dimension values for GetSavingsPlansUtilization
.
- * GetSavingsPlansUtilization
doesn't support filtering by tags. GetSavingsPlansUtilization
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
* GetSavingsPlanUtilizationDetails
internally groups data by SavingsPlansArn
.
- * GetSavingsPlansUtilizationDetails
doesn't support filtering by tags. GetSavingsPlansUtilizationDetails
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
- * GetSavingsPlansCoverage
doesn't support filtering by tags. GetSavingsPlansCoverage
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
To determine valid values for a dimension, use the GetDimensionValues
operation.
Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Management account in an organization have access to member accounts. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
You cannot group by any dimension values for GetSavingsPlansUtilization
.
- * GetSavingsPlansUtilization
doesn't support filtering by tags. GetSavingsPlansUtilization
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
* GetSavingsPlanUtilizationDetails
internally groups data by SavingsPlansArn
.
- * GetSavingsPlansUtilizationDetails
doesn't support filtering by tags. GetSavingsPlansUtilizationDetails
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
Expression
objects
* to define any combination of dimension filters. For more information, see
* Expression.
- * The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys, * cost categories, or any two group by types.
- *Valid values for the DIMENSION
type are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
,
+ *
Valid values for the DIMENSION
type are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, INVOICING_ENTITY
, LINKED_ACCOUNT
,
* OPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
,
* TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
When you group by the TAG
type and include a valid tag key, you get all tag values, including empty strings.
The GetCostAndUsageWithResources
operation requires that you either group by or filter by a
* ResourceId
. It requires the Expression
* "SERVICE = Amazon Elastic Compute Cloud - Compute"
in the filter.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
The name of the dimension. Each Dimension
is available for a different Context
.
- * For more information, see Context.
+ * For more information, see Context
.
*
*
AZ - The Availability Zone. An example is us-east-1a
.
BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following:
+ *- Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services services.
+ *- AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that is an acting reseller for Amazon Web Services services in India.
+ *- Amazon Web Services Marketplace: The entity that supports the sale of solutions built on Amazon Web Services by third-party software providers.
+ *CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
+ *DEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
*INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized
(C4
, C5
, C6g
, C7g
etc.), Memory Optimization
(R4
, R5n
, R5b
, R6g
etc).
INVOICING_ENTITY - The name of the entity issuing the Amazon Web Services invoice.
+ *LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
*RESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance.
+ *SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
+ *SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).
+ *SERVICE - The Amazon Web Services service such as Amazon DynamoDB.
*TENANCY - The tenancy of a resource. Examples are shared or dedicated.
+ *USAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation
* includes a unit attribute. Examples include GB and Hrs.
SAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
+ *SAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
*INSTANCE_TYPE
* *INVOICING_ENTITY
+ *LINKED_ACCOUNT
*
* GetReservationCoverage
uses the same
* Expression object
- * as the other operations, but only AND
is supported among each dimension. You can nest only one level deep.
AND
is supported among each dimension. You can nest only one level deep.
+ * If there are multiple values for a dimension, they are OR'd together.
* If you don't provide a SERVICE
filter, Cost Explorer defaults to EC2.
Cost category is also supported.
*/ @@ -5638,7 +5673,7 @@ export interface GetReservationUtilizationRequest { *GetReservationUtilization
uses the same
* Expression object
* as the other operations, but only AND
is supported among each dimension, and nesting is supported up to
- * only one level deep.
+ * only one level deep. If there are multiple values for a dimension, they are OR'd together.
*/
Filter?: Expression;
@@ -5897,8 +5932,8 @@ export interface GetSavingsPlansCoverageRequest {
*
* GetSavingsPlansCoverage
uses the same
* Expression object
- * as the other operations, but only AND
is supported among each dimension.
Cost category is supported. Tags are not supported.
+ * as the other operations, but onlyAND
is supported among each dimension. If there are multiple values for a dimension, they are OR'd together.
+ * Cost category is also supported.
*/ Filter?: Expression; @@ -6073,7 +6108,6 @@ export interface GetSavingsPlansUtilizationDetailsRequest { *GetSavingsPlansUtilizationDetails
uses the same
* Expression object
* as the other operations, but only AND
is supported among each dimension.
- * Filtering by tags isn't supported.
*/ Filter?: Expression; @@ -6198,7 +6232,6 @@ export interface GetSavingsPlansUtilizationRequest { *GetSavingsPlansUtilization
uses the same
* Expression object
* as the other operations, but only AND
is supported among each dimension.
- * Filtering by tags isn't supported.
*/ Filter?: Expression; diff --git a/clients/client-dynamodb/src/DynamoDB.ts b/clients/client-dynamodb/src/DynamoDB.ts index c87840e009ba7..6258151ea0b5b 100644 --- a/clients/client-dynamodb/src/DynamoDB.ts +++ b/clients/client-dynamodb/src/DynamoDB.ts @@ -337,9 +337,11 @@ export class DynamoDB extends DynamoDBClient { /** *The BatchWriteItem
operation puts or deletes multiple items in one or
- * more tables. A single call to BatchWriteItem
can write up to 16 MB of data,
- * which can comprise as many as 25 put or delete requests. Individual items to be written
- * can be as large as 400 KB.
BatchWriteItem
can transmit up to 16MB of
+ * data over the network, consisting of up to 25 item put or delete operations. While
+ * individual items can be up to 400 KB once stored, it's important to
+ * note that an item's representation might be greater than 400KB while being sent in
+ * DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.
*
* BatchWriteItem
cannot update items. To update items, use the
@@ -2274,8 +2276,8 @@ export class DynamoDB extends DynamoDBClient {
*
Updates the status for contributor insights for a specific table or index. CloudWatch * Contributor Insights for DynamoDB graphs display the partition key and (if applicable) * sort key of frequently accessed items and frequently throttled items in plaintext. If - * you require the use of AWS Key Management Service (KMS) to encrypt this table’s - * partition key and sort key data with an AWS managed key or customer managed key, you + * you require the use of Amazon Web Services Key Management Service (KMS) to encrypt this table’s + * partition key and sort key data with an Amazon Web Services managed key or customer managed key, you * should not enable CloudWatch Contributor Insights for DynamoDB for this table.
*/ public updateContributorInsights( diff --git a/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts b/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts index 56c366dfc7be6..574ac3dcc9e2b 100644 --- a/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts +++ b/clients/client-dynamodb/src/commands/BatchWriteItemCommand.ts @@ -23,9 +23,11 @@ export interface BatchWriteItemCommandOutput extends BatchWriteItemOutput, __Met /** *The BatchWriteItem
operation puts or deletes multiple items in one or
- * more tables. A single call to BatchWriteItem
can write up to 16 MB of data,
- * which can comprise as many as 25 put or delete requests. Individual items to be written
- * can be as large as 400 KB.
BatchWriteItem
can transmit up to 16MB of
+ * data over the network, consisting of up to 25 item put or delete operations. While
+ * individual items can be up to 400 KB once stored, it's important to
+ * note that an item's representation might be greater than 400KB while being sent in
+ * DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.
*
* BatchWriteItem
cannot update items. To update items, use the
diff --git a/clients/client-dynamodb/src/commands/UpdateContributorInsightsCommand.ts b/clients/client-dynamodb/src/commands/UpdateContributorInsightsCommand.ts
index edfcde0bde2c4..1ce6a9a2fbfc5 100644
--- a/clients/client-dynamodb/src/commands/UpdateContributorInsightsCommand.ts
+++ b/clients/client-dynamodb/src/commands/UpdateContributorInsightsCommand.ts
@@ -25,8 +25,8 @@ export interface UpdateContributorInsightsCommandOutput extends UpdateContributo
*
Updates the status for contributor insights for a specific table or index. CloudWatch * Contributor Insights for DynamoDB graphs display the partition key and (if applicable) * sort key of frequently accessed items and frequently throttled items in plaintext. If - * you require the use of AWS Key Management Service (KMS) to encrypt this table’s - * partition key and sort key data with an AWS managed key or customer managed key, you + * you require the use of Amazon Web Services Key Management Service (KMS) to encrypt this table’s + * partition key and sort key data with an Amazon Web Services managed key or customer managed key, you * should not enable CloudWatch Contributor Insights for DynamoDB for this table.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-dynamodb/src/models/models_0.ts b/clients/client-dynamodb/src/models/models_0.ts index c75399a5bc467..5e7887f13d1b2 100644 --- a/clients/client-dynamodb/src/models/models_0.ts +++ b/clients/client-dynamodb/src/models/models_0.ts @@ -1321,10 +1321,6 @@ export interface PointInTimeRecoveryDescription { *
- * ENABLING
- Point in time recovery is being enabled.
* ENABLED
- Point in time recovery is enabled.
Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance * that is either running or stopped.
+ *By default, Amazon EC2 shuts down and reboots the instance before creating the AMI to ensure that everything on + * the instance is stopped and in a consistent state during the creation process. If you're confident that your + * instance is in a consistent state appropriate for AMI creation, use the NoReboot + * parameter to prevent Amazon EC2 from shutting down and rebooting the instance.
+ *Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch - * new instances; however, it doesn't affect any instances that you've already launched - * from the AMI. You'll continue to incur usage costs for those instances until you - * terminate them.
+ *Deregisters the specified AMI. After you deregister an AMI, it can't be used to + * launch new instances.
+ * + * + *If you deregister an AMI that matches a Recycle Bin retention rule, the AMI is + * retained in the Recycle Bin for the specified retention period. For more information, + * see Recycle + * Bin in the Amazon Elastic Compute Cloud User Guide.
+ * + *When you deregister an AMI, it doesn't affect any instances that you've already + * launched from the AMI. You'll continue to incur usage costs for those instances until + * you terminate them.
*When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was * created for the root volume of the instance during the AMI creation process. When you * deregister an instance store-backed AMI, it doesn't affect the files that you uploaded @@ -14333,6 +14357,9 @@ export class EC2 extends EC2Client { *
Discontinue faster launching for a Windows AMI, and clean up existing pre-provisioned snapshots. * When you disable faster launching, the AMI uses the standard launch process for each * instance. All pre-provisioned snapshots must be removed before you can enable faster launching again.
+ *To change these settings, you must own the AMI.
+ *To change these settings, you must own the AMI.
+ *Lists one or more AMIs that are currently in the Recycle Bin. For more information, + * see Recycle + * Bin in the Amazon Elastic Compute Cloud User Guide.
+ */ + public listImagesInRecycleBin( + args: ListImagesInRecycleBinCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists one or more snapshots that are currently in the Recycle Bin.
*/ @@ -20512,6 +20576,38 @@ export class EC2 extends EC2Client { } } + /** + *Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon Elastic Compute Cloud User Guide.
+ */ + public restoreImageFromRecycleBin( + args: RestoreImageFromRecycleBinCommandInput, + options?: __HttpHandlerOptions + ): PromiseRestores the entries from a previous version of a managed prefix list to a new version of the prefix list.
*/ diff --git a/clients/client-ec2/src/EC2Client.ts b/clients/client-ec2/src/EC2Client.ts index 5f8f66df9891c..10b6cc22aa906 100644 --- a/clients/client-ec2/src/EC2Client.ts +++ b/clients/client-ec2/src/EC2Client.ts @@ -1377,6 +1377,10 @@ import { ImportInstanceCommandInput, ImportInstanceCommandOutput } from "./comma import { ImportKeyPairCommandInput, ImportKeyPairCommandOutput } from "./commands/ImportKeyPairCommand"; import { ImportSnapshotCommandInput, ImportSnapshotCommandOutput } from "./commands/ImportSnapshotCommand"; import { ImportVolumeCommandInput, ImportVolumeCommandOutput } from "./commands/ImportVolumeCommand"; +import { + ListImagesInRecycleBinCommandInput, + ListImagesInRecycleBinCommandOutput, +} from "./commands/ListImagesInRecycleBinCommand"; import { ListSnapshotsInRecycleBinCommandInput, ListSnapshotsInRecycleBinCommandOutput, @@ -1693,6 +1697,10 @@ import { RestoreAddressToClassicCommandInput, RestoreAddressToClassicCommandOutput, } from "./commands/RestoreAddressToClassicCommand"; +import { + RestoreImageFromRecycleBinCommandInput, + RestoreImageFromRecycleBinCommandOutput, +} from "./commands/RestoreImageFromRecycleBinCommand"; import { RestoreManagedPrefixListVersionCommandInput, RestoreManagedPrefixListVersionCommandOutput, @@ -2178,6 +2186,7 @@ export type ServiceInputTypes = | ImportKeyPairCommandInput | ImportSnapshotCommandInput | ImportVolumeCommandInput + | ListImagesInRecycleBinCommandInput | ListSnapshotsInRecycleBinCommandInput | ModifyAddressAttributeCommandInput | ModifyAvailabilityZoneGroupCommandInput @@ -2272,6 +2281,7 @@ export type ServiceInputTypes = | ResetNetworkInterfaceAttributeCommandInput | ResetSnapshotAttributeCommandInput | RestoreAddressToClassicCommandInput + | RestoreImageFromRecycleBinCommandInput | RestoreManagedPrefixListVersionCommandInput | RestoreSnapshotFromRecycleBinCommandInput | RestoreSnapshotTierCommandInput @@ -2699,6 +2709,7 @@ export type ServiceOutputTypes = | ImportKeyPairCommandOutput | ImportSnapshotCommandOutput | ImportVolumeCommandOutput + | ListImagesInRecycleBinCommandOutput | ListSnapshotsInRecycleBinCommandOutput | ModifyAddressAttributeCommandOutput | ModifyAvailabilityZoneGroupCommandOutput @@ -2793,6 +2804,7 @@ export type ServiceOutputTypes = | ResetNetworkInterfaceAttributeCommandOutput | ResetSnapshotAttributeCommandOutput | RestoreAddressToClassicCommandOutput + | RestoreImageFromRecycleBinCommandOutput | RestoreManagedPrefixListVersionCommandOutput | RestoreSnapshotFromRecycleBinCommandOutput | RestoreSnapshotTierCommandOutput diff --git a/clients/client-ec2/src/commands/CreateImageCommand.ts b/clients/client-ec2/src/commands/CreateImageCommand.ts index 6c6c8504f55e1..ec2b7bc9bb37c 100644 --- a/clients/client-ec2/src/commands/CreateImageCommand.ts +++ b/clients/client-ec2/src/commands/CreateImageCommand.ts @@ -21,6 +21,12 @@ export interface CreateImageCommandOutput extends CreateImageResult, __MetadataB /** *Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance * that is either running or stopped.
+ *By default, Amazon EC2 shuts down and reboots the instance before creating the AMI to ensure that everything on + * the instance is stopped and in a consistent state during the creation process. If you're confident that your + * instance is in a consistent state appropriate for AMI creation, use the NoReboot + * parameter to prevent Amazon EC2 from shutting down and rebooting the instance.
+ *Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch - * new instances; however, it doesn't affect any instances that you've already launched - * from the AMI. You'll continue to incur usage costs for those instances until you - * terminate them.
+ *Deregisters the specified AMI. After you deregister an AMI, it can't be used to + * launch new instances.
+ * + * + *If you deregister an AMI that matches a Recycle Bin retention rule, the AMI is + * retained in the Recycle Bin for the specified retention period. For more information, + * see Recycle + * Bin in the Amazon Elastic Compute Cloud User Guide.
+ * + *When you deregister an AMI, it doesn't affect any instances that you've already + * launched from the AMI. You'll continue to incur usage costs for those instances until + * you terminate them.
*When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was * created for the root volume of the instance during the AMI creation process. When you * deregister an instance store-backed AMI, it doesn't affect the files that you uploaded diff --git a/clients/client-ec2/src/commands/DisableFastLaunchCommand.ts b/clients/client-ec2/src/commands/DisableFastLaunchCommand.ts index 181a4bced7cbf..033de9ee1c1f2 100644 --- a/clients/client-ec2/src/commands/DisableFastLaunchCommand.ts +++ b/clients/client-ec2/src/commands/DisableFastLaunchCommand.ts @@ -25,6 +25,9 @@ export interface DisableFastLaunchCommandOutput extends DisableFastLaunchResult, *
Discontinue faster launching for a Windows AMI, and clean up existing pre-provisioned snapshots. * When you disable faster launching, the AMI uses the standard launch process for each * instance. All pre-provisioned snapshots must be removed before you can enable faster launching again.
+ *To change these settings, you must own the AMI.
+ *To change these settings, you must own the AMI.
+ *Lists one or more AMIs that are currently in the Recycle Bin. For more information, + * see Recycle + * Bin in the Amazon Elastic Compute Cloud User Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, ListImagesInRecycleBinCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, ListImagesInRecycleBinCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new ListImagesInRecycleBinCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListImagesInRecycleBinCommandInput} for command's `input` shape. + * @see {@link ListImagesInRecycleBinCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class ListImagesInRecycleBinCommand extends $Command< + ListImagesInRecycleBinCommandInput, + ListImagesInRecycleBinCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListImagesInRecycleBinCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRestores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon Elastic Compute Cloud User Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, RestoreImageFromRecycleBinCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, RestoreImageFromRecycleBinCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new RestoreImageFromRecycleBinCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreImageFromRecycleBinCommandInput} for command's `input` shape. + * @see {@link RestoreImageFromRecycleBinCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class RestoreImageFromRecycleBinCommand extends $Command< + RestoreImageFromRecycleBinCommandInput, + RestoreImageFromRecycleBinCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreImageFromRecycleBinCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackOptions for enabling a customizable text banner that will be displayed on + * Amazon Web Services provided clients when a VPN session is established.
+ */ export interface ClientLoginBannerOptions { + /** + *Enable or disable a customizable text banner that will be displayed on + * Amazon Web Services provided clients when a VPN session is established.
+ *Valid values: true | false
+ *
Default value: false
+ *
Customizable text that will be displayed in a banner on Amazon Web Services provided + * clients when a VPN session is established. UTF-8 encoded characters only. Maximum of + * 1400 characters.
+ */ BannerText?: string; } @@ -7742,7 +7760,19 @@ export interface CreateClientVpnEndpointRequest { */ ClientConnectOptions?: ClientConnectOptions; + /** + *The maximum VPN session duration time in hours.
+ *Valid values: 8 | 10 | 12 | 24
+ *
Default value: 24
+ *
Options for enabling a customizable text banner that will be displayed on + * Amazon Web Services provided clients when a VPN session is established.
+ */ ClientLoginBannerOptions?: ClientLoginBannerOptions; } diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index 997b9a0c21ea4..83379e2a05ebf 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -7907,8 +7907,23 @@ export namespace ClientConnectResponseOptions { }); } +/** + *Current state of options for customizable text banner that will be displayed on + * Amazon Web Services provided clients when a VPN session is established.
+ */ export interface ClientLoginBannerResponseOptions { + /** + *Current state of text banner feature.
+ *Valid values: true | false
+ *
Customizable text that will be displayed in a banner on Amazon Web Services provided + * clients when a VPN session is established. UTF-8 encoded + * characters only. Maximum of 1400 characters.
+ */ BannerText?: string; } @@ -8065,7 +8080,19 @@ export interface ClientVpnEndpoint { */ ClientConnectOptions?: ClientConnectResponseOptions; + /** + *The maximum VPN session duration time in hours.
+ *Valid values: 8 | 10 | 12 | 24
+ *
Default value: 24
+ *
Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is + * established.
+ */ ClientLoginBannerOptions?: ClientLoginBannerResponseOptions; } diff --git a/clients/client-ec2/src/models/models_4.ts b/clients/client-ec2/src/models/models_4.ts index b4f9a7d761a15..5a3fdab8b7f6b 100644 --- a/clients/client-ec2/src/models/models_4.ts +++ b/clients/client-ec2/src/models/models_4.ts @@ -9450,7 +9450,7 @@ export interface EnableFastLaunchRequest { LaunchTemplate?: FastLaunchLaunchTemplateSpecificationRequest; /** - *The maximum number of parallel instances to launch for creating resources.
+ *The maximum number of parallel instances to launch for creating resources. Value must be 6
or greater.
The IDs of the AMIs to list. Omit this parameter to list all of the AMIs that + * are in the Recycle Bin. You can specify up to 20 IDs in a single request.
+ */ + ImageIds?: string[]; + + /** + *The token for the next page of results.
+ */ + NextToken?: string; + + /** + *The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned nextToken
value.
If you do not specify a value for MaxResults, the request + * returns 1,000 items per page by default. For more information, see + * + * Pagination.
+ */ + MaxResults?: number; + + /** + *Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is
+ * DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Information about an AMI that is currently in the Recycle Bin.
+ */ +export interface ImageRecycleBinInfo { + /** + *The ID of the AMI.
+ */ + ImageId?: string; + + /** + *The name of the AMI.
+ */ + Name?: string; + + /** + *The description of the AMI.
+ */ + Description?: string; + + /** + *The date and time when the AMI entered the Recycle Bin.
+ */ + RecycleBinEnterTime?: Date; + + /** + *The date and time when the AMI is to be permanently deleted from the Recycle Bin.
+ */ + RecycleBinExitTime?: Date; +} + +export namespace ImageRecycleBinInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImageRecycleBinInfo): any => ({ + ...obj, + }); +} + +export interface ListImagesInRecycleBinResult { + /** + *Information about the AMIs.
+ */ + Images?: ImageRecycleBinInfo[]; + + /** + *The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The maximum number of results to return with a single call. @@ -3524,7 +3623,19 @@ export interface ModifyClientVpnEndpointRequest { */ ClientConnectOptions?: ClientConnectOptions; + /** + *
The maximum VPN session duration time in hours.
+ *Valid values: 8 | 10 | 12 | 24
+ *
Default value: 24
+ *
Options for enabling a customizable text banner that will be displayed on + * Amazon Web Services provided clients when a VPN session is established.
+ */ ClientLoginBannerOptions?: ClientLoginBannerOptions; } @@ -9656,96 +9767,3 @@ export namespace ResetImageAttributeRequest { ...obj, }); } - -export interface ResetInstanceAttributeRequest { - /** - *The attribute to reset.
- *You can only reset the following attributes: kernel
|
- * ramdisk
| sourceDestCheck
. To change an instance
- * attribute, use ModifyInstanceAttribute.
Checks whether you have the required permissions for the action, without actually making the request,
- * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
- * Otherwise, it is UnauthorizedOperation
.
The ID of the instance.
- */ - InstanceId: string | undefined; -} - -export namespace ResetInstanceAttributeRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ResetInstanceAttributeRequest): any => ({ - ...obj, - }); -} - -/** - *Contains the parameters for ResetNetworkInterfaceAttribute.
- */ -export interface ResetNetworkInterfaceAttributeRequest { - /** - *Checks whether you have the required permissions for the action, without actually making the request,
- * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
- * Otherwise, it is UnauthorizedOperation
.
The ID of the network interface.
- */ - NetworkInterfaceId: string | undefined; - - /** - *The source/destination checking attribute. Resets the value to true
.
The attribute to reset. Currently, only the attribute for permission to create volumes can - * be reset.
- */ - Attribute: SnapshotAttributeName | string | undefined; - - /** - *The ID of the snapshot.
- */ - SnapshotId: string | undefined; - - /** - *Checks whether you have the required permissions for the action, without actually making the request,
- * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
- * Otherwise, it is UnauthorizedOperation
.
The attribute to reset.
+ *You can only reset the following attributes: kernel
|
+ * ramdisk
| sourceDestCheck
. To change an instance
+ * attribute, use ModifyInstanceAttribute.
Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
+ * Otherwise, it is UnauthorizedOperation
.
The ID of the instance.
+ */ + InstanceId: string | undefined; +} + +export namespace ResetInstanceAttributeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResetInstanceAttributeRequest): any => ({ + ...obj, + }); +} + +/** + *Contains the parameters for ResetNetworkInterfaceAttribute.
+ */ +export interface ResetNetworkInterfaceAttributeRequest { + /** + *Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
+ * Otherwise, it is UnauthorizedOperation
.
The ID of the network interface.
+ */ + NetworkInterfaceId: string | undefined; + + /** + *The source/destination checking attribute. Resets the value to true
.
The attribute to reset. Currently, only the attribute for permission to create volumes can + * be reset.
+ */ + Attribute: SnapshotAttributeName | string | undefined; + + /** + *The ID of the snapshot.
+ */ + SnapshotId: string | undefined; + + /** + *Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is DryRunOperation
.
+ * Otherwise, it is UnauthorizedOperation
.
Checks whether you have the required permissions for the action, without actually making the request, @@ -79,6 +177,45 @@ export namespace RestoreAddressToClassicResult { }); } +export interface RestoreImageFromRecycleBinRequest { + /** + *
The ID of the AMI to restore.
+ */ + ImageId: string | undefined; + + /** + *Checks whether you have the required permissions for the action, without actually making the request,
+ * and provides an error response. If you have the required permissions, the error response is
+ * DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, it returns an error.
Checks whether you have the required permissions for the action, without actually making the request,
diff --git a/clients/client-ec2/src/pagination/ListImagesInRecycleBinPaginator.ts b/clients/client-ec2/src/pagination/ListImagesInRecycleBinPaginator.ts
new file mode 100644
index 0000000000000..3f2cb556ad7c4
--- /dev/null
+++ b/clients/client-ec2/src/pagination/ListImagesInRecycleBinPaginator.ts
@@ -0,0 +1,59 @@
+import { Paginator } from "@aws-sdk/types";
+
+import {
+ ListImagesInRecycleBinCommand,
+ ListImagesInRecycleBinCommandInput,
+ ListImagesInRecycleBinCommandOutput,
+} from "../commands/ListImagesInRecycleBinCommand";
+import { EC2 } from "../EC2";
+import { EC2Client } from "../EC2Client";
+import { EC2PaginationConfiguration } from "./Interfaces";
+
+/**
+ * @private
+ */
+const makePagedClientRequest = async (
+ client: EC2Client,
+ input: ListImagesInRecycleBinCommandInput,
+ ...args: any
+): Promiseddd
are:
redis3.2
|
* redis4.0
|
* redis5.0
|
- * redis6.0
|
- * redis6.2
+ * redis6.x
*
*/
CacheParameterGroupFamily: string | undefined;
@@ -3634,7 +3633,7 @@ export interface CacheParameterGroup {
* redis3.2
|
* redis4.0
|
* redis5.0
|
- * redis6.0
|
+ * redis6.x
|
*
*/
CacheParameterGroupFamily?: string;
diff --git a/clients/client-elasticsearch-service/src/ElasticsearchService.ts b/clients/client-elasticsearch-service/src/ElasticsearchService.ts
index 5afdebbad404b..38a340529d806 100644
--- a/clients/client-elasticsearch-service/src/ElasticsearchService.ts
+++ b/clients/client-elasticsearch-service/src/ElasticsearchService.ts
@@ -61,6 +61,11 @@ import {
DescribeDomainAutoTunesCommandInput,
DescribeDomainAutoTunesCommandOutput,
} from "./commands/DescribeDomainAutoTunesCommand";
+import {
+ DescribeDomainChangeProgressCommand,
+ DescribeDomainChangeProgressCommandInput,
+ DescribeDomainChangeProgressCommandOutput,
+} from "./commands/DescribeDomainChangeProgressCommand";
import {
DescribeElasticsearchDomainCommand,
DescribeElasticsearchDomainCommandInput,
@@ -622,6 +627,39 @@ export class ElasticsearchService extends ElasticsearchServiceClient {
}
}
+ /**
+ * Returns information about the current blue/green deployment happening on a domain, including + * a change ID, status, and progress stages.
+ */ + public describeDomainChangeProgress( + args: DescribeDomainChangeProgressCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns domain configuration information about the specified Elasticsearch domain, including the domain ID, domain endpoint, and domain ARN.
*/ diff --git a/clients/client-elasticsearch-service/src/ElasticsearchServiceClient.ts b/clients/client-elasticsearch-service/src/ElasticsearchServiceClient.ts index 8bd20d364a4a8..19e5f9cac75f4 100644 --- a/clients/client-elasticsearch-service/src/ElasticsearchServiceClient.ts +++ b/clients/client-elasticsearch-service/src/ElasticsearchServiceClient.ts @@ -90,6 +90,10 @@ import { DescribeDomainAutoTunesCommandInput, DescribeDomainAutoTunesCommandOutput, } from "./commands/DescribeDomainAutoTunesCommand"; +import { + DescribeDomainChangeProgressCommandInput, + DescribeDomainChangeProgressCommandOutput, +} from "./commands/DescribeDomainChangeProgressCommand"; import { DescribeElasticsearchDomainCommandInput, DescribeElasticsearchDomainCommandOutput, @@ -190,6 +194,7 @@ export type ServiceInputTypes = | DeleteOutboundCrossClusterSearchConnectionCommandInput | DeletePackageCommandInput | DescribeDomainAutoTunesCommandInput + | DescribeDomainChangeProgressCommandInput | DescribeElasticsearchDomainCommandInput | DescribeElasticsearchDomainConfigCommandInput | DescribeElasticsearchDomainsCommandInput @@ -232,6 +237,7 @@ export type ServiceOutputTypes = | DeleteOutboundCrossClusterSearchConnectionCommandOutput | DeletePackageCommandOutput | DescribeDomainAutoTunesCommandOutput + | DescribeDomainChangeProgressCommandOutput | DescribeElasticsearchDomainCommandOutput | DescribeElasticsearchDomainConfigCommandOutput | DescribeElasticsearchDomainsCommandOutput diff --git a/clients/client-elasticsearch-service/src/commands/DescribeDomainChangeProgressCommand.ts b/clients/client-elasticsearch-service/src/commands/DescribeDomainChangeProgressCommand.ts new file mode 100644 index 0000000000000..4bb75e9b76fd2 --- /dev/null +++ b/clients/client-elasticsearch-service/src/commands/DescribeDomainChangeProgressCommand.ts @@ -0,0 +1,105 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { + ElasticsearchServiceClientResolvedConfig, + ServiceInputTypes, + ServiceOutputTypes, +} from "../ElasticsearchServiceClient"; +import { DescribeDomainChangeProgressRequest, DescribeDomainChangeProgressResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeDomainChangeProgressCommand, + serializeAws_restJson1DescribeDomainChangeProgressCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeDomainChangeProgressCommandInput extends DescribeDomainChangeProgressRequest {} +export interface DescribeDomainChangeProgressCommandOutput + extends DescribeDomainChangeProgressResponse, + __MetadataBearer {} + +/** + *Returns information about the current blue/green deployment happening on a domain, including + * a change ID, status, and progress stages.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ElasticsearchServiceClient, DescribeDomainChangeProgressCommand } from "@aws-sdk/client-elasticsearch-service"; // ES Modules import + * // const { ElasticsearchServiceClient, DescribeDomainChangeProgressCommand } = require("@aws-sdk/client-elasticsearch-service"); // CommonJS import + * const client = new ElasticsearchServiceClient(config); + * const command = new DescribeDomainChangeProgressCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeDomainChangeProgressCommandInput} for command's `input` shape. + * @see {@link DescribeDomainChangeProgressCommandOutput} for command's `response` shape. + * @see {@link ElasticsearchServiceClientResolvedConfig | config} for ElasticsearchServiceClient's `config` shape. + * + */ +export class DescribeDomainChangeProgressCommand extends $Command< + DescribeDomainChangeProgressCommandInput, + DescribeDomainChangeProgressCommandOutput, + ElasticsearchServiceClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeDomainChangeProgressCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackSpecifies change details of the domain configuration change.
+ */ +export interface ChangeProgressDetails { + /** + *The unique change identifier associated with a specific domain configuration change.
+ */ + ChangeId?: string; + + /** + *Contains an optional message associated with the domain configuration change.
+ */ + Message?: string; +} + +export namespace ChangeProgressDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ChangeProgressDetails): any => ({ + ...obj, + }); +} + /** *Options to specify the subnets and security groups for VPC endpoint. For more information, see VPC Endpoints for Amazon Elasticsearch Service Domains.
*/ @@ -1688,6 +1712,11 @@ export interface ElasticsearchDomainStatus { *The current status of the Elasticsearch domain's Auto-Tune options.
*/ AutoTuneOptions?: AutoTuneOptionsOutput; + + /** + *Specifies change details of the domain configuration change.
+ */ + ChangeProgressDetails?: ChangeProgressDetails; } export namespace ElasticsearchDomainStatus { @@ -2353,6 +2382,146 @@ export namespace DescribeDomainAutoTunesResponse { }); } +/** + *Container for the parameters to the DescribeDomainChangeProgress
operation. Specifies the
+ * domain name and optional change specific identity for which you want progress information.
+ *
The domain you want to get the progress information about.
+ */ + DomainName: string | undefined; + + /** + *The specific change ID for which you want to get progress information. This is an optional parameter. + * If omitted, the service returns information about the most recent configuration change. + *
+ */ + ChangeId?: string; +} + +export namespace DescribeDomainChangeProgressRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeDomainChangeProgressRequest): any => ({ + ...obj, + }); +} + +/** + *A progress stage details of a specific domain configuration change.
+ */ +export interface ChangeProgressStage { + /** + *The name of the specific progress stage.
+ */ + Name?: string; + + /** + *The overall status of a specific progress stage.
+ */ + Status?: string; + + /** + *The description of the progress stage.
+ */ + Description?: string; + + /** + *The last updated timestamp of the progress stage.
+ */ + LastUpdated?: Date; +} + +export namespace ChangeProgressStage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ChangeProgressStage): any => ({ + ...obj, + }); +} + +export enum OverallChangeStatus { + COMPLETED = "COMPLETED", + FAILED = "FAILED", + PENDING = "PENDING", + PROCESSING = "PROCESSING", +} + +/** + *The progress details of a specific domain configuration change.
+ */ +export interface ChangeProgressStatusDetails { + /** + *The unique change identifier associated with a specific domain configuration change.
+ */ + ChangeId?: string; + + /** + *The time at which the configuration change is made on the domain.
+ */ + StartTime?: Date; + + /** + *The overall status of the domain configuration change. This field can take the following values: PENDING
, PROCESSING
, COMPLETED
and FAILED
The list of properties involved in the domain configuration change that are still in pending.
+ */ + PendingProperties?: string[]; + + /** + *The list of properties involved in the domain configuration change that are completed.
+ */ + CompletedProperties?: string[]; + + /** + *The total number of stages required for the configuration change.
+ */ + TotalNumberOfStages?: number; + + /** + *The specific stages that the domain is going through to perform the configuration change.
+ */ + ChangeProgressStages?: ChangeProgressStage[]; +} + +export namespace ChangeProgressStatusDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ChangeProgressStatusDetails): any => ({ + ...obj, + }); +} + +/** + *The result of a DescribeDomainChangeProgress
request. Contains the progress information of
+ * the requested domain change.
+ *
Progress information for the configuration change that is requested in the DescribeDomainChangeProgress
request.
+ *
Container for the parameters to the DescribeElasticsearchDomain
operation.
Specifies AutoTuneOptions
for the domain.
Specifies change details of the domain configuration change.
+ */ + ChangeProgressDetails?: ChangeProgressDetails; } export namespace ElasticsearchDomainConfig { @@ -5051,6 +5225,11 @@ export interface UpgradeElasticsearchDomainResponse { * */ PerformCheckOnly?: boolean; + + /** + *Specifies change details of the domain configuration change.
+ */ + ChangeProgressDetails?: ChangeProgressDetails; } export namespace UpgradeElasticsearchDomainResponse { diff --git a/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts b/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts index 4fa51aad6b359..f901d2b8d814f 100644 --- a/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts +++ b/clients/client-elasticsearch-service/src/protocols/Aws_restJson1.ts @@ -59,6 +59,10 @@ import { DescribeDomainAutoTunesCommandInput, DescribeDomainAutoTunesCommandOutput, } from "../commands/DescribeDomainAutoTunesCommand"; +import { + DescribeDomainChangeProgressCommandInput, + DescribeDomainChangeProgressCommandOutput, +} from "../commands/DescribeDomainChangeProgressCommand"; import { DescribeElasticsearchDomainCommandInput, DescribeElasticsearchDomainCommandOutput, @@ -160,6 +164,9 @@ import { AutoTuneOptionsStatus, AutoTuneStatus, BaseException, + ChangeProgressDetails, + ChangeProgressStage, + ChangeProgressStatusDetails, CognitoOptions, CognitoOptionsStatus, ColdStorageOptions, @@ -676,6 +683,40 @@ export const serializeAws_restJson1DescribeDomainAutoTunesCommand = async ( }); }; +export const serializeAws_restJson1DescribeDomainChangeProgressCommand = async ( + input: DescribeDomainChangeProgressCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/2015-01-01/es/domain/{DomainName}/progress"; + if (input.DomainName !== undefined) { + const labelValue: string = input.DomainName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: DomainName."); + } + resolvedPath = resolvedPath.replace("{DomainName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: DomainName."); + } + const query: any = { + ...(input.ChangeId !== undefined && { changeid: input.ChangeId }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1DescribeElasticsearchDomainCommand = async ( input: DescribeElasticsearchDomainCommandInput, context: __SerdeContext @@ -2632,6 +2673,88 @@ const deserializeAws_restJson1DescribeDomainAutoTunesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1DescribeDomainChangeProgressCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseYou can only add steps to a cluster that is in one of the following states: STARTING, * BOOTSTRAPPING, RUNNING, or WAITING.
+ *The string values passed into HadoopJarStep
object cannot exceed a total of 10240 characters.
Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
+ *Auto-termination is supported in Amazon EMR versions 5.30.0 and 6.1.0 and later. For more information, see Using an auto-termination policy.
+ *Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
*/ public putAutoTerminationPolicy( args: PutAutoTerminationPolicyCommandInput, @@ -1804,7 +1810,12 @@ export class EMR extends EMRClient { } /** - *Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account.
+ * To restrict cluster access using an IAM policy, see Identity and Access Management for EMR.
+ * true
, IAM principals in the
+ *
Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the
* Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
*For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
diff --git a/clients/client-emr/src/commands/AddJobFlowStepsCommand.ts b/clients/client-emr/src/commands/AddJobFlowStepsCommand.ts index 39e2213acc464..e5cca00e0c1b7 100644 --- a/clients/client-emr/src/commands/AddJobFlowStepsCommand.ts +++ b/clients/client-emr/src/commands/AddJobFlowStepsCommand.ts @@ -39,6 +39,9 @@ export interface AddJobFlowStepsCommandOutput extends AddJobFlowStepsOutput, __M * step was running must have completed and run successfully. *You can only add steps to a cluster that is in one of the following states: STARTING, * BOOTSTRAPPING, RUNNING, or WAITING.
+ *The string values passed into HadoopJarStep
object cannot exceed a total of 10240 characters.
Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
+ *Auto-termination is supported in Amazon EMR versions 5.30.0 and 6.1.0 and later. For more information, see Using an auto-termination policy.
+ *Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-emr/src/commands/SetVisibleToAllUsersCommand.ts b/clients/client-emr/src/commands/SetVisibleToAllUsersCommand.ts index e949e400b46dc..f7f3f42f58468 100644 --- a/clients/client-emr/src/commands/SetVisibleToAllUsersCommand.ts +++ b/clients/client-emr/src/commands/SetVisibleToAllUsersCommand.ts @@ -22,7 +22,12 @@ export interface SetVisibleToAllUsersCommandInput extends SetVisibleToAllUsersIn export interface SetVisibleToAllUsersCommandOutput extends __MetadataBearer {} /** - *Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account.
+ * To restrict cluster access using an IAM policy, see Identity and Access Management for EMR.
+ * true
, IAM principals in the
+ *
Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the
* Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
*For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
diff --git a/clients/client-emr/src/models/models_0.ts b/clients/client-emr/src/models/models_0.ts index bff20e926e26a..67455c6c667bd 100644 --- a/clients/client-emr/src/models/models_0.ts +++ b/clients/client-emr/src/models/models_0.ts @@ -19,7 +19,7 @@ export enum InstanceFleetType { */ export interface VolumeSpecification { /** - *The volume type. Volume types supported are gp2, io1, standard.
+ *The volume type. Volume types supported are gp2, io1, and standard.
*/ VolumeType: string | undefined; @@ -193,6 +193,10 @@ export type SpotProvisioningTimeoutAction = "SWITCH_TO_ON_DEMAND" | "TERMINATE_C * later, excluding 5.0.x versions. Spot Instance allocation strategy is available in * Amazon EMR version 5.12.1 and later. *Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. + *
+ *Spot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022. + *
+ *A user-defined key, which is the minimum required information for a valid tag. For more - * information, see Tag .
+ * information, see Tag. */ Key?: string; @@ -2199,7 +2207,7 @@ export interface InstanceGroupDetail { InstanceRunningCount: number | undefined; /** - *State of instance group. The following values are deprecated: STARTING, TERMINATED, and + *
State of instance group. The following values are no longer supported: STARTING, TERMINATED, and * FAILED.
*/ State: InstanceGroupState | string | undefined; @@ -2502,7 +2510,7 @@ export interface JobFlowDetail { * Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. Whenfalse
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
* The default value is true
if a value is not provided when creating a
* cluster using the EMR API RunJobFlow command, the CLI
- * create-cluster command, or the Amazon Web Services Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
* The default value is true
if a value is not provided when creating a
* cluster using the EMR API RunJobFlow command, the CLI
- * create-cluster command, or the Amazon Web Services Management Console. IAM principals that are
- * allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications * for Amazon EMR to install and configure when launching the cluster. For a list of - * applications available for each Amazon EMR release version, see the Amazon EMR Release + * applications available for each Amazon EMR release version, see the Amazon EMRRelease * Guide.
*/ Applications?: Application[]; @@ -6855,7 +6862,10 @@ export interface RunJobFlowInput { Configurations?: Configuration[]; /** - *Set this value to true
so that IAM principals in the Amazon Web Services account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true
for clusters created using the EMR API or the CLI create-cluster command.
The VisibleToAllUsers parameter is no longer supported. By default, the value is set to true
. Setting it to false
now has no effect.
Set this value to true
so that IAM principals in the Amazon Web Services account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Gets information about the specified resource type.
+ */ + public getTargetResourceType( + args: GetTargetResourceTypeCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists the available FIS actions.
*/ @@ -363,6 +405,38 @@ export class Fis extends FisClient { } } + /** + *Lists the target resource types.
+ */ + public listTargetResourceTypes( + args: ListTargetResourceTypesCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts running an experiment from the specified experiment template.
*/ diff --git a/clients/client-fis/src/FisClient.ts b/clients/client-fis/src/FisClient.ts index d224b3c729507..ec360623edfb8 100644 --- a/clients/client-fis/src/FisClient.ts +++ b/clients/client-fis/src/FisClient.ts @@ -64,6 +64,10 @@ import { GetExperimentTemplateCommandInput, GetExperimentTemplateCommandOutput, } from "./commands/GetExperimentTemplateCommand"; +import { + GetTargetResourceTypeCommandInput, + GetTargetResourceTypeCommandOutput, +} from "./commands/GetTargetResourceTypeCommand"; import { ListActionsCommandInput, ListActionsCommandOutput } from "./commands/ListActionsCommand"; import { ListExperimentsCommandInput, ListExperimentsCommandOutput } from "./commands/ListExperimentsCommand"; import { @@ -74,6 +78,10 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + ListTargetResourceTypesCommandInput, + ListTargetResourceTypesCommandOutput, +} from "./commands/ListTargetResourceTypesCommand"; import { StartExperimentCommandInput, StartExperimentCommandOutput } from "./commands/StartExperimentCommand"; import { StopExperimentCommandInput, StopExperimentCommandOutput } from "./commands/StopExperimentCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; @@ -90,10 +98,12 @@ export type ServiceInputTypes = | GetActionCommandInput | GetExperimentCommandInput | GetExperimentTemplateCommandInput + | GetTargetResourceTypeCommandInput | ListActionsCommandInput | ListExperimentTemplatesCommandInput | ListExperimentsCommandInput | ListTagsForResourceCommandInput + | ListTargetResourceTypesCommandInput | StartExperimentCommandInput | StopExperimentCommandInput | TagResourceCommandInput @@ -106,10 +116,12 @@ export type ServiceOutputTypes = | GetActionCommandOutput | GetExperimentCommandOutput | GetExperimentTemplateCommandOutput + | GetTargetResourceTypeCommandOutput | ListActionsCommandOutput | ListExperimentTemplatesCommandOutput | ListExperimentsCommandOutput | ListTagsForResourceCommandOutput + | ListTargetResourceTypesCommandOutput | StartExperimentCommandOutput | StopExperimentCommandOutput | TagResourceCommandOutput diff --git a/clients/client-fis/src/commands/GetTargetResourceTypeCommand.ts b/clients/client-fis/src/commands/GetTargetResourceTypeCommand.ts new file mode 100644 index 0000000000000..1f6bae7397f13 --- /dev/null +++ b/clients/client-fis/src/commands/GetTargetResourceTypeCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FisClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FisClient"; +import { GetTargetResourceTypeRequest, GetTargetResourceTypeResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetTargetResourceTypeCommand, + serializeAws_restJson1GetTargetResourceTypeCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetTargetResourceTypeCommandInput extends GetTargetResourceTypeRequest {} +export interface GetTargetResourceTypeCommandOutput extends GetTargetResourceTypeResponse, __MetadataBearer {} + +/** + *Gets information about the specified resource type.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FisClient, GetTargetResourceTypeCommand } from "@aws-sdk/client-fis"; // ES Modules import + * // const { FisClient, GetTargetResourceTypeCommand } = require("@aws-sdk/client-fis"); // CommonJS import + * const client = new FisClient(config); + * const command = new GetTargetResourceTypeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTargetResourceTypeCommandInput} for command's `input` shape. + * @see {@link GetTargetResourceTypeCommandOutput} for command's `response` shape. + * @see {@link FisClientResolvedConfig | config} for FisClient's `config` shape. + * + */ +export class GetTargetResourceTypeCommand extends $Command< + GetTargetResourceTypeCommandInput, + GetTargetResourceTypeCommandOutput, + FisClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTargetResourceTypeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists the target resource types.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FisClient, ListTargetResourceTypesCommand } from "@aws-sdk/client-fis"; // ES Modules import + * // const { FisClient, ListTargetResourceTypesCommand } = require("@aws-sdk/client-fis"); // CommonJS import + * const client = new FisClient(config); + * const command = new ListTargetResourceTypesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTargetResourceTypesCommandInput} for command's `input` shape. + * @see {@link ListTargetResourceTypesCommandOutput} for command's `response` shape. + * @see {@link FisClientResolvedConfig | config} for FisClient's `config` shape. + * + */ +export class ListTargetResourceTypesCommand extends $Command< + ListTargetResourceTypesCommandInput, + ListTargetResourceTypesCommandOutput, + FisClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTargetResourceTypesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe Amazon Web Services resource type. The resource type must be supported for the specified action.
+ *The resource type. The resource type must be supported for the specified action.
*/ resourceType: string | undefined; @@ -264,6 +264,11 @@ export interface CreateExperimentTemplateTargetInput { * */ selectionMode: string | undefined; + + /** + *The resource type parameters.
+ */ + parameters?: { [key: string]: string }; } export namespace CreateExperimentTemplateTargetInput { @@ -436,6 +441,11 @@ export interface ExperimentTemplateTarget { *Scopes the identified resources to a specific count or percentage.
*/ selectionMode?: string; + + /** + *The resource type parameters.
+ */ + parameters?: { [key: string]: string }; } export namespace ExperimentTemplateTarget { @@ -780,6 +790,11 @@ export interface ExperimentTarget { *Scopes the identified resources to a specific count or percentage.
*/ selectionMode?: string; + + /** + *The resource type parameters.
+ */ + parameters?: { [key: string]: string }; } export namespace ExperimentTarget { @@ -1034,6 +1049,92 @@ export namespace GetExperimentTemplateResponse { }); } +export interface GetTargetResourceTypeRequest { + /** + *The resource type.
+ */ + resourceType: string | undefined; +} + +export namespace GetTargetResourceTypeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTargetResourceTypeRequest): any => ({ + ...obj, + }); +} + +/** + *Describes the parameters for a resource type. Use parameters to determine which tasks are + * identified during target resolution.
+ */ +export interface TargetResourceTypeParameter { + /** + *A description of the parameter.
+ */ + description?: string; + + /** + *Indicates whether the parameter is required.
+ */ + required?: boolean; +} + +export namespace TargetResourceTypeParameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetResourceTypeParameter): any => ({ + ...obj, + }); +} + +/** + *Describes a resource type.
+ */ +export interface TargetResourceType { + /** + *The resource type.
+ */ + resourceType?: string; + + /** + *A description of the resource type.
+ */ + description?: string; + + /** + *The parameters for the resource type.
+ */ + parameters?: { [key: string]: TargetResourceTypeParameter }; +} + +export namespace TargetResourceType { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetResourceType): any => ({ + ...obj, + }); +} + +export interface GetTargetResourceTypeResponse { + /** + *Information about the resource type.
+ */ + targetResourceType?: TargetResourceType; +} + +export namespace GetTargetResourceTypeResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTargetResourceTypeResponse): any => ({ + ...obj, + }); +} + export interface ListActionsRequest { /** *The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The token for the next page of results.
+ */ + nextToken?: string; +} + +export namespace ListTargetResourceTypesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTargetResourceTypesRequest): any => ({ + ...obj, + }); +} + +/** + *Describes a resource type.
+ */ +export interface TargetResourceTypeSummary { + /** + *The resource type.
+ */ + resourceType?: string; + + /** + *A description of the resource type.
+ */ + description?: string; +} + +export namespace TargetResourceTypeSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetResourceTypeSummary): any => ({ + ...obj, + }); +} + +export interface ListTargetResourceTypesResponse { + /** + *The target resource types.
+ */ + targetResourceTypes?: TargetResourceTypeSummary[]; + + /** + *The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the request.
@@ -1400,7 +1567,7 @@ export namespace UpdateExperimentTemplateStopConditionInput { */ export interface UpdateExperimentTemplateTargetInput { /** - *The Amazon Web Services resource type. The resource type must be supported for the specified action.
+ *The resource type. The resource type must be supported for the specified action.
*/ resourceType: string | undefined; @@ -1423,6 +1590,11 @@ export interface UpdateExperimentTemplateTargetInput { *Scopes the identified resources to a specific count or percentage.
*/ selectionMode: string | undefined; + + /** + *The resource type parameters.
+ */ + parameters?: { [key: string]: string }; } export namespace UpdateExperimentTemplateTargetInput { diff --git a/clients/client-fis/src/pagination/ListTargetResourceTypesPaginator.ts b/clients/client-fis/src/pagination/ListTargetResourceTypesPaginator.ts new file mode 100644 index 0000000000000..67dda8e1d1db9 --- /dev/null +++ b/clients/client-fis/src/pagination/ListTargetResourceTypesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTargetResourceTypesCommand, + ListTargetResourceTypesCommandInput, + ListTargetResourceTypesCommandOutput, +} from "../commands/ListTargetResourceTypesCommand"; +import { Fis } from "../Fis"; +import { FisClient } from "../FisClient"; +import { FisPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FisClient, + input: ListTargetResourceTypesCommandInput, + ...args: any +): PromiseThe data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently only AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The transaction at which to do the write.
*/ - TransactionId: string | undefined; + TransactionId?: string; /** *A list of WriteOperation
objects that define an object to add to or delete from the manifest for a governed table.
This reference provides descriptions of the low-level AWS Marketplace Metering -Service API.
+This reference provides descriptions of the low-level AWS Marketplace Metering Service +API.
AWS Marketplace sellers can use this API to submit usage data for custom usage dimensions.
-For information on the permissions you need to use this API, see -AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide. +
For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the +AWS Marketplace Seller Guide.
Submitting Metering Records @@ -22,15 +22,15 @@ dimensions.
-MeterUsage- Submits the metering record for a Marketplace -product. MeterUsage is called from an EC2 instance or a container running on EKS -or ECS.
+MeterUsage - Submits the metering record for an AWS +Marketplace product.MeterUsage
is called from an EC2 instance or a
+container running on EKS or ECS.
-BatchMeterUsage- Submits the metering record for a set of -customers. BatchMeterUsage is called from a software-as-a-service (SaaS) -application.
+BatchMeterUsage - Submits the metering record for a set of +customers.BatchMeterUsage
is called from a software-as-a-service
+(SaaS) application.
@@ -39,11 +39,15 @@ application.
-ResolveCustomer- Called by a SaaS application during the +ResolveCustomer - Called by a SaaS application during the registration process. When a buyer visits your website during the registration process, the buyer submits a Registration Token through the browser. The -Registration Token is resolved through this API to obtain a CustomerIdentifier -and Product Code.
+Registration Token is resolved through this API to obtain a +CustomerIdentifier
+
+along with the CustomerAWSAccountId
and
+ProductCode
.
+
@@ -51,20 +55,23 @@ and Product Code.
Paid container software products sold through AWS Marketplace must -integrate with the AWS Marketplace Metering Service and call the RegisterUsage -operation for software entitlement and metering. Free and BYOL products for -Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do -so if you want to receive usage data in your seller reports. For more -information on using the RegisterUsage operation, see Container-Based Products.
+Paid container software products sold through AWS Marketplace must integrate
+with the AWS Marketplace Metering Service and call the
+RegisterUsage
operation for software entitlement and metering.
+Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
+RegisterUsage
, but you can do so if you want to receive usage
+data in your seller reports. For more information on using the
+RegisterUsage
operation, see Container-Based Products.
BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to -verify that the SaaS metering records that you sent are accurate by searching for -records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit -records over time. For more information, see the -AWS CloudTrail User Guide -.
+
+BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
+Cloudtrail to verify that the SaaS metering records that you sent are accurate by
+searching for records with the eventName
of BatchMeterUsage
.
+You can also use CloudTrail to audit records over time. For more information, see the
+
+AWS CloudTrail User Guide.
+
This reference provides descriptions of the low-level AWS Marketplace Metering - * Service API.
+ *This reference provides descriptions of the low-level AWS Marketplace Metering Service + * API.
*AWS Marketplace sellers can use this API to submit usage data for custom usage * dimensions.
- *For information on the permissions you need to use this API, see - * AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide. - *
+ *For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the + * AWS Marketplace Seller Guide. + *
** Submitting Metering Records *
*- * MeterUsage- Submits the metering record for a Marketplace - * product. MeterUsage is called from an EC2 instance or a container running on EKS - * or ECS.
+ * MeterUsage - Submits the metering record for an AWS + * Marketplace product.MeterUsage
is called from an EC2 instance or a
+ * container running on EKS or ECS.
* - * BatchMeterUsage- Submits the metering record for a set of - * customers. BatchMeterUsage is called from a software-as-a-service (SaaS) - * application.
+ * BatchMeterUsage - Submits the metering record for a set of + * customers.BatchMeterUsage
is called from a software-as-a-service
+ * (SaaS) application.
* @@ -50,11 +50,14 @@ import { MarketplaceMeteringClient } from "./MarketplaceMeteringClient"; *
- * ResolveCustomer- Called by a SaaS application during the + * ResolveCustomer - Called by a SaaS application during the * registration process. When a buyer visits your website during the registration * process, the buyer submits a Registration Token through the browser. The - * Registration Token is resolved through this API to obtain a CustomerIdentifier - * and Product Code.
+ * Registration Token is resolved through this API to obtain a + *CustomerIdentifier
+ *
+ * along with the CustomerAWSAccountId
and
+ * ProductCode
.
* @@ -62,34 +65,53 @@ import { MarketplaceMeteringClient } from "./MarketplaceMeteringClient"; *
*Paid container software products sold through AWS Marketplace must - * integrate with the AWS Marketplace Metering Service and call the RegisterUsage - * operation for software entitlement and metering. Free and BYOL products for - * Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do - * so if you want to receive usage data in your seller reports. For more - * information on using the RegisterUsage operation, see Container-Based Products.
+ *Paid container software products sold through AWS Marketplace must integrate
+ * with the AWS Marketplace Metering Service and call the
+ * RegisterUsage
operation for software entitlement and metering.
+ * Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
+ * RegisterUsage
, but you can do so if you want to receive usage
+ * data in your seller reports. For more information on using the
+ * RegisterUsage
operation, see Container-Based Products.
BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to - * verify that the SaaS metering records that you sent are accurate by searching for - * records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit - * records over time. For more information, see the - * AWS CloudTrail User Guide - * .
+ *
+ * BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
+ * Cloudtrail to verify that the SaaS metering records that you sent are accurate by
+ * searching for records with the eventName
of BatchMeterUsage
.
+ * You can also use CloudTrail to audit records over time. For more information, see the
+ *
+ * AWS CloudTrail User Guide.
+ *
BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to - * post metering records for a set of customers.
- *For identical requests, the API is idempotent; requests can be retried with the - * same records or a subset of the input records.
- *Every request to BatchMeterUsage is for one product. If you need to meter usage for - * multiple products, you must make multiple calls to BatchMeterUsage.
- *BatchMeterUsage can process up to 25 UsageRecords at a time.
- *A UsageRecord can optionally include multiple usage allocations, to provide customers - * with usagedata split into buckets by tags that you define (or allow the customer to - * define).
- *BatchMeterUsage requests must be less than 1MB in size.
+ *
+ * BatchMeterUsage
is called from a SaaS application listed on AWS
+ * Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same + * records or a subset of the input records.
+ *Every request to BatchMeterUsage
is for one product. If you need to meter
+ * usage for multiple products, you must make multiple calls to
+ * BatchMeterUsage
.
Usage records are expected to be submitted as quickly as possible after the event that + * is being recorded, and are not accepted more than 6 hours after the event.
+ *
+ * BatchMeterUsage
can process up to 25 UsageRecords
at a
+ * time.
A UsageRecord
can optionally include multiple usage allocations, to
+ * provide customers with usage data split into buckets by tags that you define (or allow
+ * the customer to define).
+ * BatchMeterUsage
returns a list of UsageRecordResult
objects,
+ * showing the result for each UsageRecord
, as well as a list of
+ * UnprocessedRecords
, indicating errors in the service side that you
+ * should retry.
+ * BatchMeterUsage
requests must be less than 1MB in size.
For an example of using BatchMeterUsage
, see BatchMeterUsage code example in the AWS Marketplace Seller
+ * Guide.
API to emit metering records. For identical requests, the API is idempotent. It - * simply returns the metering record ID.
- *MeterUsage is authenticated on the buyer's AWS account using credentials from the - * EC2 instance, ECS task, or EKS pod.
- *MeterUsage can optionally include multiple usage allocations, to provide customers - * with usage data split into buckets by tags that you define (or allow the customer to - * define).
+ *API to emit metering records. For identical requests, the API is idempotent. It simply + * returns the metering record ID.
+ *
+ * MeterUsage
is authenticated on the buyer's AWS account using credentials
+ * from the EC2 instance, ECS task, or EKS pod.
+ * MeterUsage
can optionally include multiple usage allocations, to provide
+ * customers with usage data split into buckets by tags that you define (or allow the
+ * customer to define).
Usage records are expected to be submitted as quickly as possible after the event that + * is being recorded, and are not accepted more than 6 hours after the event.
*/ public meterUsage(args: MeterUsageCommandInput, options?: __HttpHandlerOptions): PromisePaid container software products sold through AWS Marketplace must integrate with - * the AWS Marketplace Metering Service and call the RegisterUsage operation for software - * entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't - * required to call RegisterUsage, but you may choose to do so if you would like to receive - * usage data in your seller reports. The sections below explain the behavior of - * RegisterUsage. RegisterUsage performs two primary functions: metering and - * entitlement.
- * + *Paid container software products sold through AWS Marketplace must integrate with the
+ * AWS Marketplace Metering Service and call the RegisterUsage
operation for
+ * software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS
+ * aren't required to call RegisterUsage
, but you may choose to do so if you
+ * would like to receive usage data in your seller reports. The sections below explain the
+ * behavior of RegisterUsage
. RegisterUsage
performs two primary
+ * functions: metering and entitlement.
- * Entitlement: RegisterUsage allows you to verify that the - * customer running your paid software is subscribed to your product on AWS - * Marketplace, enabling you to guard against unauthorized use. Your container - * image that integrates with RegisterUsage is only required to guard against - * unauthorized use at container startup, as such a - * CustomerNotSubscribedException/PlatformNotSupportedException will only be thrown - * on the initial call to RegisterUsage. Subsequent calls from the same Amazon ECS - * task instance (e.g. task-id) or Amazon EKS pod will not throw a - * CustomerNotSubscribedException, even if the customer unsubscribes while the - * Amazon ECS task or Amazon EKS pod is still running.
+ * Entitlement:RegisterUsage
allows you to
+ * verify that the customer running your paid software is subscribed to your
+ * product on AWS Marketplace, enabling you to guard against unauthorized use. Your
+ * container image that integrates with RegisterUsage
is only required
+ * to guard against unauthorized use at container startup, as such a
+ * CustomerNotSubscribedException
or
+ * PlatformNotSupportedException
will only be thrown on the
+ * initial call to RegisterUsage
. Subsequent calls from the same
+ * Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a
+ * CustomerNotSubscribedException
, even if the customer
+ * unsubscribes while the Amazon ECS task or Amazon EKS pod is still
+ * running.
* - * Metering: RegisterUsage meters software use per ECS task, - * per hour, or per pod for Amazon EKS with usage prorated to the second. A minimum - * of 1 minute of usage applies to tasks that are short lived. For example, if a - * customer has a 10 node Amazon ECS or Amazon EKS cluster and a service configured - * as a Daemon Set, then Amazon ECS or Amazon EKS will launch a task on all 10 - * cluster nodes and the customer will be charged: (10 * hourly_rate). Metering for - * software use is automatically handled by the AWS Marketplace Metering Control - * Plane -- your software is not required to perform any metering specific actions, - * other than call RegisterUsage once for metering of software use to commence. The - * AWS Marketplace Metering Control Plane will also continue to bill customers for - * running ECS tasks and Amazon EKS pods, regardless of the customers subscription - * state, removing the need for your software to perform entitlement checks at - * runtime.
+ * Metering:RegisterUsage
meters software use
+ * per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the
+ * second. A minimum of 1 minute of usage applies to tasks that are short lived.
+ * For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a
+ * service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a
+ * task on all 10 cluster nodes and the customer will be charged: (10 *
+ * hourly_rate). Metering for software use is automatically handled by the AWS
+ * Marketplace Metering Control Plane -- your software is not required to perform
+ * any metering specific actions, other than call RegisterUsage
once
+ * for metering of software use to commence. The AWS Marketplace Metering Control
+ * Plane will also continue to bill customers for running ECS tasks and Amazon EKS
+ * pods, regardless of the customers subscription state, removing the need for your
+ * software to perform entitlement checks at runtime.
* ResolveCustomer is called by a SaaS application during the registration process. - * When a buyer visits your website during the registration process, the buyer submits a - * registration token through their browser. The registration token is resolved through - * this API to obtain a CustomerIdentifier and product code.
+ *
+ * ResolveCustomer
is called by a SaaS application during the registration
+ * process. When a buyer visits your website during the registration process, the buyer
+ * submits a registration token through their browser. The registration token is resolved
+ * through this API to obtain a CustomerIdentifier
+ * along with the
+ * CustomerAWSAccountId
and
+ * ProductCode
.
The API needs to called from the seller account id used to publish the SaaS + * application to successfully resolve the token.
+ *For an example of using ResolveCustomer
, see ResolveCustomer code example in the AWS Marketplace Seller
+ * Guide.
This reference provides descriptions of the low-level AWS Marketplace Metering - * Service API.
+ *This reference provides descriptions of the low-level AWS Marketplace Metering Service + * API.
*AWS Marketplace sellers can use this API to submit usage data for custom usage * dimensions.
- *For information on the permissions you need to use this API, see - * AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide. - *
+ *For information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the + * AWS Marketplace Seller Guide. + *
** Submitting Metering Records *
*- * MeterUsage- Submits the metering record for a Marketplace - * product. MeterUsage is called from an EC2 instance or a container running on EKS - * or ECS.
+ * MeterUsage - Submits the metering record for an AWS + * Marketplace product.MeterUsage
is called from an EC2 instance or a
+ * container running on EKS or ECS.
* - * BatchMeterUsage- Submits the metering record for a set of - * customers. BatchMeterUsage is called from a software-as-a-service (SaaS) - * application.
+ * BatchMeterUsage - Submits the metering record for a set of + * customers.BatchMeterUsage
is called from a software-as-a-service
+ * (SaaS) application.
* @@ -253,11 +253,14 @@ export interface MarketplaceMeteringClientResolvedConfig extends MarketplaceMete *
- * ResolveCustomer- Called by a SaaS application during the + * ResolveCustomer - Called by a SaaS application during the * registration process. When a buyer visits your website during the registration * process, the buyer submits a Registration Token through the browser. The - * Registration Token is resolved through this API to obtain a CustomerIdentifier - * and Product Code.
+ * Registration Token is resolved through this API to obtain a + *CustomerIdentifier
+ *
+ * along with the CustomerAWSAccountId
and
+ * ProductCode
.
* @@ -265,20 +268,23 @@ export interface MarketplaceMeteringClientResolvedConfig extends MarketplaceMete *
*Paid container software products sold through AWS Marketplace must - * integrate with the AWS Marketplace Metering Service and call the RegisterUsage - * operation for software entitlement and metering. Free and BYOL products for - * Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do - * so if you want to receive usage data in your seller reports. For more - * information on using the RegisterUsage operation, see Container-Based Products.
+ *Paid container software products sold through AWS Marketplace must integrate
+ * with the AWS Marketplace Metering Service and call the
+ * RegisterUsage
operation for software entitlement and metering.
+ * Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call
+ * RegisterUsage
, but you can do so if you want to receive usage
+ * data in your seller reports. For more information on using the
+ * RegisterUsage
operation, see Container-Based Products.
BatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to - * verify that the SaaS metering records that you sent are accurate by searching for - * records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit - * records over time. For more information, see the - * AWS CloudTrail User Guide - * .
+ *
+ * BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use
+ * Cloudtrail to verify that the SaaS metering records that you sent are accurate by
+ * searching for records with the eventName
of BatchMeterUsage
.
+ * You can also use CloudTrail to audit records over time. For more information, see the
+ *
+ * AWS CloudTrail User Guide.
+ *
BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to - * post metering records for a set of customers.
- *For identical requests, the API is idempotent; requests can be retried with the - * same records or a subset of the input records.
- *Every request to BatchMeterUsage is for one product. If you need to meter usage for - * multiple products, you must make multiple calls to BatchMeterUsage.
- *BatchMeterUsage can process up to 25 UsageRecords at a time.
- *A UsageRecord can optionally include multiple usage allocations, to provide customers - * with usagedata split into buckets by tags that you define (or allow the customer to - * define).
- *BatchMeterUsage requests must be less than 1MB in size.
+ *
+ * BatchMeterUsage
is called from a SaaS application listed on AWS
+ * Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same + * records or a subset of the input records.
+ *Every request to BatchMeterUsage
is for one product. If you need to meter
+ * usage for multiple products, you must make multiple calls to
+ * BatchMeterUsage
.
Usage records are expected to be submitted as quickly as possible after the event that + * is being recorded, and are not accepted more than 6 hours after the event.
+ *
+ * BatchMeterUsage
can process up to 25 UsageRecords
at a
+ * time.
A UsageRecord
can optionally include multiple usage allocations, to
+ * provide customers with usage data split into buckets by tags that you define (or allow
+ * the customer to define).
+ * BatchMeterUsage
returns a list of UsageRecordResult
objects,
+ * showing the result for each UsageRecord
, as well as a list of
+ * UnprocessedRecords
, indicating errors in the service side that you
+ * should retry.
+ * BatchMeterUsage
requests must be less than 1MB in size.
For an example of using BatchMeterUsage
, see BatchMeterUsage code example in the AWS Marketplace Seller
+ * Guide.
API to emit metering records. For identical requests, the API is idempotent. It - * simply returns the metering record ID.
- *MeterUsage is authenticated on the buyer's AWS account using credentials from the - * EC2 instance, ECS task, or EKS pod.
- *MeterUsage can optionally include multiple usage allocations, to provide customers - * with usage data split into buckets by tags that you define (or allow the customer to - * define).
+ *API to emit metering records. For identical requests, the API is idempotent. It simply + * returns the metering record ID.
+ *
+ * MeterUsage
is authenticated on the buyer's AWS account using credentials
+ * from the EC2 instance, ECS task, or EKS pod.
+ * MeterUsage
can optionally include multiple usage allocations, to provide
+ * customers with usage data split into buckets by tags that you define (or allow the
+ * customer to define).
Usage records are expected to be submitted as quickly as possible after the event that + * is being recorded, and are not accepted more than 6 hours after the event.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-marketplace-metering/src/commands/RegisterUsageCommand.ts b/clients/client-marketplace-metering/src/commands/RegisterUsageCommand.ts index 070dd3d214773..0120bf82c3761 100644 --- a/clients/client-marketplace-metering/src/commands/RegisterUsageCommand.ts +++ b/clients/client-marketplace-metering/src/commands/RegisterUsageCommand.ts @@ -26,43 +26,44 @@ export interface RegisterUsageCommandInput extends RegisterUsageRequest {} export interface RegisterUsageCommandOutput extends RegisterUsageResult, __MetadataBearer {} /** - *Paid container software products sold through AWS Marketplace must integrate with - * the AWS Marketplace Metering Service and call the RegisterUsage operation for software - * entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't - * required to call RegisterUsage, but you may choose to do so if you would like to receive - * usage data in your seller reports. The sections below explain the behavior of - * RegisterUsage. RegisterUsage performs two primary functions: metering and - * entitlement.
- * + *Paid container software products sold through AWS Marketplace must integrate with the
+ * AWS Marketplace Metering Service and call the RegisterUsage
operation for
+ * software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS
+ * aren't required to call RegisterUsage
, but you may choose to do so if you
+ * would like to receive usage data in your seller reports. The sections below explain the
+ * behavior of RegisterUsage
. RegisterUsage
performs two primary
+ * functions: metering and entitlement.
- * Entitlement: RegisterUsage allows you to verify that the - * customer running your paid software is subscribed to your product on AWS - * Marketplace, enabling you to guard against unauthorized use. Your container - * image that integrates with RegisterUsage is only required to guard against - * unauthorized use at container startup, as such a - * CustomerNotSubscribedException/PlatformNotSupportedException will only be thrown - * on the initial call to RegisterUsage. Subsequent calls from the same Amazon ECS - * task instance (e.g. task-id) or Amazon EKS pod will not throw a - * CustomerNotSubscribedException, even if the customer unsubscribes while the - * Amazon ECS task or Amazon EKS pod is still running.
+ * Entitlement:RegisterUsage
allows you to
+ * verify that the customer running your paid software is subscribed to your
+ * product on AWS Marketplace, enabling you to guard against unauthorized use. Your
+ * container image that integrates with RegisterUsage
is only required
+ * to guard against unauthorized use at container startup, as such a
+ * CustomerNotSubscribedException
or
+ * PlatformNotSupportedException
will only be thrown on the
+ * initial call to RegisterUsage
. Subsequent calls from the same
+ * Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a
+ * CustomerNotSubscribedException
, even if the customer
+ * unsubscribes while the Amazon ECS task or Amazon EKS pod is still
+ * running.
* - * Metering: RegisterUsage meters software use per ECS task, - * per hour, or per pod for Amazon EKS with usage prorated to the second. A minimum - * of 1 minute of usage applies to tasks that are short lived. For example, if a - * customer has a 10 node Amazon ECS or Amazon EKS cluster and a service configured - * as a Daemon Set, then Amazon ECS or Amazon EKS will launch a task on all 10 - * cluster nodes and the customer will be charged: (10 * hourly_rate). Metering for - * software use is automatically handled by the AWS Marketplace Metering Control - * Plane -- your software is not required to perform any metering specific actions, - * other than call RegisterUsage once for metering of software use to commence. The - * AWS Marketplace Metering Control Plane will also continue to bill customers for - * running ECS tasks and Amazon EKS pods, regardless of the customers subscription - * state, removing the need for your software to perform entitlement checks at - * runtime.
+ * Metering:RegisterUsage
meters software use
+ * per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the
+ * second. A minimum of 1 minute of usage applies to tasks that are short lived.
+ * For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a
+ * service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a
+ * task on all 10 cluster nodes and the customer will be charged: (10 *
+ * hourly_rate). Metering for software use is automatically handled by the AWS
+ * Marketplace Metering Control Plane -- your software is not required to perform
+ * any metering specific actions, other than call RegisterUsage
once
+ * for metering of software use to commence. The AWS Marketplace Metering Control
+ * Plane will also continue to bill customers for running ECS tasks and Amazon EKS
+ * pods, regardless of the customers subscription state, removing the need for your
+ * software to perform entitlement checks at runtime.
* ResolveCustomer is called by a SaaS application during the registration process. - * When a buyer visits your website during the registration process, the buyer submits a - * registration token through their browser. The registration token is resolved through - * this API to obtain a CustomerIdentifier and product code.
+ *
+ * ResolveCustomer
is called by a SaaS application during the registration
+ * process. When a buyer visits your website during the registration process, the buyer
+ * submits a registration token through their browser. The registration token is resolved
+ * through this API to obtain a CustomerIdentifier
+ * along with the
+ * CustomerAWSAccountId
and
+ * ProductCode
.
The API needs to called from the seller account id used to publish the SaaS + * application to successfully resolve the token.
+ *For an example of using ResolveCustomer
, see ResolveCustomer code example in the AWS Marketplace Seller
+ * Guide.
Metadata assigned to an allocation. Each tag is made up of a key and a value.
+ *Metadata assigned to an allocation. Each tag is made up of a key
and a
+ * value
.
One part of a key-value pair that makes up a tag. A key is a label that acts like a - * category for the specific tag values.
+ *One part of a key-value pair that makes up a tag
. A key
is a
+ * label that acts like a category for the specific tag values.
One part of a key-value pair that makes up a tag. A value acts as a descriptor within - * a tag category (key). The value can be empty or null.
+ *One part of a key-value pair that makes up a tag
. A value
+ * acts as a descriptor within a tag category (key). The value can be empty or null.
Usage allocations allow you to split usage into buckets by tags.
- *Each UsageAllocation indicates the usage quantity for a specific set of tags.
+ *Each UsageAllocation
indicates the usage quantity for a specific set of
+ * tags.
The set of tags that define the bucket of usage. For the bucket of items with no - * tags, this parameter can be left out.
+ *The set of tags that define the bucket of usage. For the bucket of items with no tags, + * this parameter can be left out.
*/ Tags?: Tag[]; } @@ -53,29 +55,29 @@ export namespace UsageAllocation { } /** - *A UsageRecord indicates a quantity of usage for a given product, customer, - * dimension and time.
- *Multiple requests with the same UsageRecords as input will be deduplicated to - * prevent double charges.
+ *A UsageRecord
indicates a quantity of usage for a given product,
+ * customer, dimension and time.
Multiple requests with the same UsageRecords
as input will be
+ * de-duplicated to prevent double charges.
Timestamp, in UTC, for which the usage is being reported.
*Your application can meter usage for up to one hour in the past. Make sure the - * timestamp value is not before the start of the software usage.
+ *timestamp
value is not before the start of the software usage.
*/
Timestamp: Date | undefined;
/**
- * The CustomerIdentifier is obtained through the ResolveCustomer operation and - * represents an individual buyer in your application.
+ *The CustomerIdentifier
is obtained through the
+ * ResolveCustomer
operation and represents an individual buyer in your
+ * application.
During the process of registering a product on AWS Marketplace, up to eight - * dimensions are specified. These represent different units of value in your - * application.
+ *During the process of registering a product on AWS Marketplace, dimensions are + * specified. These represent different units of value in your application.
*/ Dimension: string | undefined; @@ -86,8 +88,9 @@ export interface UsageRecord { Quantity?: number; /** - *The set of UsageAllocations to submit. The sum of all UsageAllocation quantities - * must equal the Quantity of the UsageRecord.
+ *The set of UsageAllocations
to submit. The sum of all
+ * UsageAllocation
quantities must equal the Quantity of the
+ * UsageRecord
.
A BatchMeterUsageRequest contains UsageRecords, which indicate quantities of usage - * within your application.
+ *A BatchMeterUsageRequest
contains UsageRecords
, which
+ * indicate quantities of usage within your application.
The set of UsageRecords to submit. BatchMeterUsage accepts up to 25 UsageRecords at - * a time.
+ *The set of UsageRecords
to submit. BatchMeterUsage
accepts
+ * up to 25 UsageRecords
at a time.
Product code is used to uniquely identify a product in AWS Marketplace. The product - * code should be the same as the one used during the publishing of a new - * product.
+ * code should be the same as the one used during the publishing of a new product. */ ProductCode: string | undefined; } @@ -136,41 +138,58 @@ export enum UsageRecordResultStatus { } /** - *A UsageRecordResult indicates the status of a given UsageRecord processed by - * BatchMeterUsage.
+ *A UsageRecordResult
indicates the status of a given
+ * UsageRecord
processed by BatchMeterUsage
.
The UsageRecord that was part of the BatchMeterUsage request.
+ *The UsageRecord
that was part of the BatchMeterUsage
+ * request.
The MeteringRecordId is a unique identifier for this metering event.
+ *The MeteringRecordId
is a unique identifier for this metering
+ * event.
The UsageRecordResult Status indicates the status of an individual UsageRecord - * processed by BatchMeterUsage.
+ *The UsageRecordResult
+ * Status
indicates the status of an individual UsageRecord
+ * processed by BatchMeterUsage
.
- * Success- The UsageRecord was accepted and honored by - * BatchMeterUsage.
+ * Success- TheUsageRecord
was accepted and
+ * honored by BatchMeterUsage
.
* - * CustomerNotSubscribed- The CustomerIdentifier specified is - * not subscribed to your product. The UsageRecord was not honored. Future - * UsageRecords for this customer will fail until the customer subscribes to your - * product.
+ * CustomerNotSubscribed- TheCustomerIdentifier
+ * specified is not able to use your product. The UsageRecord
was not
+ * honored. There are three causes for this result:
+ * The customer identifier is invalid.
+ *The customer identifier provided in the metering record does not have
+ * an active agreement or subscription with this product. Future
+ * UsageRecords
for this customer will fail until the
+ * customer subscribes to your product.
The customer's AWS account was suspended.
+ *- * DuplicateRecord- Indicates that the UsageRecord was invalid - * and not honored. A previously metered UsageRecord had the same customer, - * dimension, and time, but a different quantity.
+ * DuplicateRecord- Indicates that the + *UsageRecord
was invalid and not honored. A previously metered
+ * UsageRecord
had the same customer, dimension, and time, but a
+ * different quantity.
* Contains the UsageRecords processed by BatchMeterUsage and any records that have - * failed due to transient error.
+ *Contains the UsageRecords
processed by BatchMeterUsage
and
+ * any records that have failed due to transient error.
Contains all UsageRecords processed by BatchMeterUsage. These records were either - * honored by AWS Marketplace Metering Service or were invalid.
+ *Contains all UsageRecords
processed by BatchMeterUsage
.
+ * These records were either honored by AWS Marketplace Metering Service or were invalid.
+ * Invalid records should be fixed before being resubmitted.
Contains all UsageRecords that were not processed by BatchMeterUsage. This is a - * list of UsageRecords. You can retry the failed request by making another BatchMeterUsage - * call with this list as input in the BatchMeterUsageRequest.
+ *Contains all UsageRecords
that were not processed by
+ * BatchMeterUsage
. This is a list of UsageRecords
. You can
+ * retry the failed request by making another BatchMeterUsage
call with this
+ * list as input in the BatchMeterUsageRequest
.
You have metered usage for a CustomerIdentifier that does not exist.
+ *You have metered usage for a CustomerIdentifier
that does not
+ * exist.
The usage allocation objects are invalid, or the number of allocations is greater - * than 500 for a single usage record.
+ *The usage allocation objects are invalid, or the number of allocations is greater than + * 500 for a single usage record.
*/ export interface InvalidUsageAllocationsException extends __SmithyException, $MetadataBearer { name: "InvalidUsageAllocationsException"; @@ -272,8 +294,8 @@ export interface InvalidUsageAllocationsException extends __SmithyException, $Me } /** - *The usage dimension does not match one of the UsageDimensions associated with - * products.
+ *The usage dimension does not match one of the UsageDimensions
associated
+ * with products.
The timestamp value passed in the meterUsage() is out of allowed range.
+ *The timestamp
value passed in the UsageRecord
is out of
+ * allowed range.
For BatchMeterUsage
, if any of the records are outside of the allowed
+ * range, the entire batch is not processed. You must remove invalid records and try
+ * again.
A metering record has already been emitted by the same EC2 instance, ECS task, or - * EKS pod for the given {usageDimension, timestamp} with a different - * usageQuantity.
+ *A metering record has already been emitted by the same EC2 instance, ECS task, or EKS
+ * pod for the given {usageDimension
, timestamp
} with a different
+ * usageQuantity
.
Product code is used to uniquely identify a product in AWS Marketplace. The product - * code should be the same as the one used during the publishing of a new - * product.
+ * code should be the same as the one used during the publishing of a new product. */ ProductCode: string | undefined; /** - *Timestamp, in UTC, for which the usage is being reported. Your application can - * meter usage for up to one hour in the past. Make sure the timestamp value is not before - * the start of the software usage.
+ *Timestamp, in UTC, for which the usage is being reported. Your application can meter
+ * usage for up to one hour in the past. Make sure the timestamp
value is not
+ * before the start of the software usage.
Consumption value for the hour. Defaults to 0
if not
- * specified.
Consumption value for the hour. Defaults to 0
if not specified.
Checks whether you have the permissions required for the action, but does not make
- * the request. If you have the permissions, the request returns DryRunOperation;
- * otherwise, it returns UnauthorizedException. Defaults to false
if not
- * specified.
Checks whether you have the permissions required for the action, but does not make the
+ * request. If you have the permissions, the request returns DryRunOperation
;
+ * otherwise, it returns UnauthorizedException
. Defaults to false
+ * if not specified.
The set of UsageAllocations to submit.
- *The sum of all UsageAllocation quantities must equal the - * UsageQuantity of the MeterUsage request, and each UsageAllocation must have a - * unique set of tags (include no tags).
+ *The set of UsageAllocations
to submit.
The sum of all UsageAllocation
quantities must equal the
+ * UsageQuantity
of the MeterUsage
request, and each
+ * UsageAllocation
must have a unique set of tags (include no
+ * tags).
RegisterUsage must be called in the same AWS Region the ECS task was launched in. - * This prevents a container from hardcoding a Region (e.g. withRegion(“us-east-1”) when - * calling RegisterUsage.
+ *
+ * RegisterUsage
must be called in the same AWS Region the ECS task was
+ * launched in. This prevents a container from hardcoding a Region (e.g.
+ * withRegion(“us-east-1”) when calling RegisterUsage
.
Product code is used to uniquely identify a product in AWS Marketplace. The product - * code should be the same as the one used during the publishing of a new - * product.
+ * code should be the same as the one used during the publishing of a new product. */ ProductCode: string | undefined; @@ -444,8 +469,8 @@ export interface RegisterUsageRequest { PublicKeyVersion: number | undefined; /** - *(Optional) To scope down the registration to a specific running software instance - * and guard against replay attacks.
+ *(Optional) To scope down the registration to a specific running software instance and + * guard against replay attacks.
*/ Nonce?: string; } @@ -481,11 +506,11 @@ export namespace RegisterUsageResult { } /** - *The submitted registration token has expired. This can happen if the buyer's - * browser takes too long to redirect to your page, the buyer has resubmitted the - * registration token, or your application has held on to the registration token for too - * long. Your SaaS registration website should redeem this token as soon as it is submitted - * by the buyer's browser.
+ *The submitted registration token has expired. This can happen if the buyer's browser + * takes too long to redirect to your page, the buyer has resubmitted the registration + * token, or your application has held on to the registration token for too long. Your SaaS + * registration website should redeem this token as soon as it is submitted by the buyer's + * browser.
*/ export interface ExpiredTokenException extends __SmithyException, $MetadataBearer { name: "ExpiredTokenException"; @@ -503,13 +528,17 @@ export interface InvalidTokenException extends __SmithyException, $MetadataBeare } /** - *Contains input to the ResolveCustomer operation.
+ *Contains input to the ResolveCustomer
operation.
When a buyer visits your website during the registration process, the buyer submits - * a registration token through the browser. The registration token is resolved to obtain a - * CustomerIdentifier and product code.
+ *When a buyer visits your website during the registration process, the buyer submits a
+ * registration token through the browser. The registration token is resolved to obtain a
+ * CustomerIdentifier
+ * along with the
+ * CustomerAWSAccountId
+ * and
+ * ProductCode
.
The result of the ResolveCustomer operation. Contains the CustomerIdentifier and - * product code.
+ *The result of the ResolveCustomer
operation. Contains the
+ * CustomerIdentifier
+ *
+ * along with the CustomerAWSAccountId
and
+ * ProductCode
.
The CustomerIdentifier is used to identify an individual customer in your - * application. Calls to BatchMeterUsage require CustomerIdentifiers for each - * UsageRecord.
+ *The CustomerIdentifier
is used to identify an individual customer in your
+ * application. Calls to BatchMeterUsage
require
+ * CustomerIdentifiers
for each UsageRecord
.
The product code is returned to confirm that the buyer is registering for your
- * product. Subsequent BatchMeterUsage calls should be made using this product
+ * product. Subsequent BatchMeterUsage
calls should be made using this product
* code.
The CustomerAWSAccountId
provides the AWS account ID associated with the
+ * CustomerIdentifier
for the individual customer.
+ * Minimum recommendation requests per second + *
* + *When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second
+ * (minRecommendationRequestsPerSecond
) specifies the baseline recommendation request throughput provisioned by
+ * Amazon Personalize. The default minRecommendationRequestsPerSecond is 1
. A recommendation request is a single GetRecommendations
operation.
+ * Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive
+ * your requests per hour and the price of your recommender usage.
+ *
+ * If your requests per second increases beyond
+ * minRecommendationRequestsPerSecond
, Amazon Personalize auto-scales the provisioned capacity up and down,
+ * but never below minRecommendationRequestsPerSecond
.
+ * There's a short time delay while the capacity is increased that might cause loss of
+ * requests.
+ * Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond)
+ * or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window.
+ *
+ * We recommend starting with the default minRecommendationRequestsPerSecond
, track
+ * your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond
+ * as necessary.
+ *
* Status diff --git a/clients/client-personalize/src/commands/CreateRecommenderCommand.ts b/clients/client-personalize/src/commands/CreateRecommenderCommand.ts index d9f92ab21b30b..2db6073f60697 100644 --- a/clients/client-personalize/src/commands/CreateRecommenderCommand.ts +++ b/clients/client-personalize/src/commands/CreateRecommenderCommand.ts @@ -28,7 +28,30 @@ export interface CreateRecommenderCommandOutput extends CreateRecommenderRespons * request. *
* + *+ * Minimum recommendation requests per second + *
+ * + *When you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second
+ * (minRecommendationRequestsPerSecond
) specifies the baseline recommendation request throughput provisioned by
+ * Amazon Personalize. The default minRecommendationRequestsPerSecond is 1
. A recommendation request is a single GetRecommendations
operation.
+ * Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive
+ * your requests per hour and the price of your recommender usage.
+ *
+ * If your requests per second increases beyond
+ * minRecommendationRequestsPerSecond
, Amazon Personalize auto-scales the provisioned capacity up and down,
+ * but never below minRecommendationRequestsPerSecond
.
+ * There's a short time delay while the capacity is increased that might cause loss of
+ * requests.
+ * Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond)
+ * or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window.
*
+ * We recommend starting with the default minRecommendationRequestsPerSecond
, track
+ * your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond
+ * as necessary.
+ *
* Status diff --git a/clients/client-personalize/src/models/models_0.ts b/clients/client-personalize/src/models/models_0.ts index 8a7ce6feff4ce..ff832087ea813 100644 --- a/clients/client-personalize/src/models/models_0.ts +++ b/clients/client-personalize/src/models/models_0.ts @@ -257,7 +257,7 @@ export interface S3DataConfig { /** *
The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to - * encrypt or decrypt the input and output files of a batch inference job.
+ * encrypt or decrypt the input and output files. */ kmsKeyArn?: string; } @@ -971,6 +971,12 @@ export interface RecommenderConfig { * (not popular items or similar items). */ itemExplorationConfig?: { [key: string]: string }; + + /** + *Specifies the requested minimum provisioned recommendation requests per second that + * Amazon Personalize will support.
+ */ + minRecommendationRequestsPerSecond?: number; } export namespace RecommenderConfig { @@ -2958,7 +2964,7 @@ export namespace DescribeRecommenderRequest { /** *Provides a summary of the properties of a recommender update. For a complete listing, call the - * DescribeRecommender API.
+ * DescribeRecommender API operation. */ export interface RecommenderUpdateSummary { /** diff --git a/clients/client-personalize/src/protocols/Aws_json1_1.ts b/clients/client-personalize/src/protocols/Aws_json1_1.ts index 24526462cb14c..3f3b08279d953 100644 --- a/clients/client-personalize/src/protocols/Aws_json1_1.ts +++ b/clients/client-personalize/src/protocols/Aws_json1_1.ts @@ -5625,6 +5625,10 @@ const serializeAws_json1_1RecommenderConfig = (input: RecommenderConfig, context input.itemExplorationConfig !== null && { itemExplorationConfig: serializeAws_json1_1HyperParameters(input.itemExplorationConfig, context), }), + ...(input.minRecommendationRequestsPerSecond !== undefined && + input.minRecommendationRequestsPerSecond !== null && { + minRecommendationRequestsPerSecond: input.minRecommendationRequestsPerSecond, + }), }; }; @@ -7256,6 +7260,7 @@ const deserializeAws_json1_1RecommenderConfig = (output: any, context: __SerdeCo output.itemExplorationConfig !== undefined && output.itemExplorationConfig !== null ? deserializeAws_json1_1HyperParameters(output.itemExplorationConfig, context) : undefined, + minRecommendationRequestsPerSecond: __expectInt32(output.minRecommendationRequestsPerSecond), } as any; }; diff --git a/clients/client-rbin/README.md b/clients/client-rbin/README.md index adf9545bf1140..40ac5ae4fe83e 100644 --- a/clients/client-rbin/README.md +++ b/clients/client-rbin/README.md @@ -10,17 +10,17 @@ AWS SDK for JavaScript Rbin Client for Node.js, Browser and React Native.This is the Recycle Bin API Reference. This documentation provides descriptions and syntax for each of the actions and data types in Recycle Bin.
-Recycle Bin is a snapshot recovery feature that enables you to restore accidentally -deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained -in the Recycle Bin for a time period that you specify.
- -You can restore a snapshot from the Recycle Bin at any time before its retention period -expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the -Recycle Bin, and you can then use it in the same way you use any other snapshot in your -account. If the retention period expires and the snapshot is not restored, the snapshot is -permanently deleted from the Recycle Bin and is no longer available for recovery. For more +
Recycle Bin is a resource recovery feature that enables you to restore accidentally +deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are +deleted, they are retained in the Recycle Bin for a time period that you specify.
+ +You can restore a resource from the Recycle Bin at any time before its retention period +expires. After you restore a resource from the Recycle Bin, the resource is removed from the +Recycle Bin, and you can then use it in the same way you use any other resource of that type +in your account. If the retention period expires and the resource is not restored, the resource +is permanently deleted from the Recycle Bin and is no longer available for recovery. For more information about Recycle Bin, see -Recycle Bin in the Amazon EC2 User Guide.
+Recycle Bin in the Amazon Elastic Compute Cloud User Guide. ## Installing diff --git a/clients/client-rbin/src/Rbin.ts b/clients/client-rbin/src/Rbin.ts index e5d04c03cfef9..de610a9be6ec9 100644 --- a/clients/client-rbin/src/Rbin.ts +++ b/clients/client-rbin/src/Rbin.ts @@ -22,22 +22,22 @@ import { RbinClient } from "./RbinClient"; *This is the Recycle Bin API Reference. This documentation provides * descriptions and syntax for each of the actions and data types in Recycle Bin.
* - *Recycle Bin is a snapshot recovery feature that enables you to restore accidentally - * deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained - * in the Recycle Bin for a time period that you specify.
+ *Recycle Bin is a resource recovery feature that enables you to restore accidentally + * deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are + * deleted, they are retained in the Recycle Bin for a time period that you specify.
* - *You can restore a snapshot from the Recycle Bin at any time before its retention period - * expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the - * Recycle Bin, and you can then use it in the same way you use any other snapshot in your - * account. If the retention period expires and the snapshot is not restored, the snapshot is - * permanently deleted from the Recycle Bin and is no longer available for recovery. For more + *
You can restore a resource from the Recycle Bin at any time before its retention period + * expires. After you restore a resource from the Recycle Bin, the resource is removed from the + * Recycle Bin, and you can then use it in the same way you use any other resource of that type + * in your account. If the retention period expires and the resource is not restored, the resource + * is permanently deleted from the Recycle Bin and is no longer available for recovery. For more * information about Recycle Bin, see - * Recycle Bin in the Amazon EC2 User Guide.
+ * Recycle Bin in the Amazon Elastic Compute Cloud User Guide. */ export class Rbin extends RbinClient { /** *Creates a Recycle Bin retention rule. For more information, see - * Create Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Create Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. */ public createRule(args: CreateRuleCommandInput, options?: __HttpHandlerOptions): PromiseDeletes a Recycle Bin retention rule. For more information, see - * Delete Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Delete Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. */ public deleteRule(args: DeleteRuleCommandInput, options?: __HttpHandlerOptions): PromiseLists the tags assigned a specific resource.
+ *Lists the tags assigned to a retention rule.
*/ public listTagsForResource( args: ListTagsForResourceCommandInput, @@ -174,7 +174,7 @@ export class Rbin extends RbinClient { } /** - *Assigns tags to the specified resource.
+ *Assigns tags to the specified retention rule.
*/ public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): PromiseUnassigns a tag from a resource.
+ *Unassigns a tag from a retention rule.
*/ public untagResource( args: UntagResourceCommandInput, @@ -233,7 +233,7 @@ export class Rbin extends RbinClient { /** *Updates an existing Recycle Bin retention rule. For more information, see - * Update Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Update Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. */ public updateRule(args: UpdateRuleCommandInput, options?: __HttpHandlerOptions): PromiseThis is the Recycle Bin API Reference. This documentation provides * descriptions and syntax for each of the actions and data types in Recycle Bin.
* - *Recycle Bin is a snapshot recovery feature that enables you to restore accidentally - * deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained - * in the Recycle Bin for a time period that you specify.
+ *Recycle Bin is a resource recovery feature that enables you to restore accidentally + * deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are + * deleted, they are retained in the Recycle Bin for a time period that you specify.
* - *You can restore a snapshot from the Recycle Bin at any time before its retention period - * expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the - * Recycle Bin, and you can then use it in the same way you use any other snapshot in your - * account. If the retention period expires and the snapshot is not restored, the snapshot is - * permanently deleted from the Recycle Bin and is no longer available for recovery. For more + *
You can restore a resource from the Recycle Bin at any time before its retention period + * expires. After you restore a resource from the Recycle Bin, the resource is removed from the + * Recycle Bin, and you can then use it in the same way you use any other resource of that type + * in your account. If the retention period expires and the resource is not restored, the resource + * is permanently deleted from the Recycle Bin and is no longer available for recovery. For more * information about Recycle Bin, see - * Recycle Bin in the Amazon EC2 User Guide.
+ * Recycle Bin in the Amazon Elastic Compute Cloud User Guide. */ export class RbinClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-rbin/src/commands/CreateRuleCommand.ts b/clients/client-rbin/src/commands/CreateRuleCommand.ts index 12d0767af4a3d..b199efab60957 100644 --- a/clients/client-rbin/src/commands/CreateRuleCommand.ts +++ b/clients/client-rbin/src/commands/CreateRuleCommand.ts @@ -23,7 +23,7 @@ export interface CreateRuleCommandOutput extends CreateRuleResponse, __MetadataB /** *Creates a Recycle Bin retention rule. For more information, see - * Create Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Create Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/commands/DeleteRuleCommand.ts b/clients/client-rbin/src/commands/DeleteRuleCommand.ts index c7cf5bb3ebc68..d559eb8252963 100644 --- a/clients/client-rbin/src/commands/DeleteRuleCommand.ts +++ b/clients/client-rbin/src/commands/DeleteRuleCommand.ts @@ -23,7 +23,7 @@ export interface DeleteRuleCommandOutput extends DeleteRuleResponse, __MetadataB /** *Deletes a Recycle Bin retention rule. For more information, see - * Delete Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Delete Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts b/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts index 5dbe71fa4f752..c3f2584c6a202 100644 --- a/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts +++ b/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts @@ -22,7 +22,7 @@ export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequ export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} /** - *Lists the tags assigned a specific resource.
+ *Lists the tags assigned to a retention rule.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/commands/TagResourceCommand.ts b/clients/client-rbin/src/commands/TagResourceCommand.ts index 689ab4f9a5613..108b2bb59b014 100644 --- a/clients/client-rbin/src/commands/TagResourceCommand.ts +++ b/clients/client-rbin/src/commands/TagResourceCommand.ts @@ -22,7 +22,7 @@ export interface TagResourceCommandInput extends TagResourceRequest {} export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} /** - *Assigns tags to the specified resource.
+ *Assigns tags to the specified retention rule.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/commands/UntagResourceCommand.ts b/clients/client-rbin/src/commands/UntagResourceCommand.ts index 017ba5d5dfb9c..9d7755ea78cec 100644 --- a/clients/client-rbin/src/commands/UntagResourceCommand.ts +++ b/clients/client-rbin/src/commands/UntagResourceCommand.ts @@ -22,7 +22,7 @@ export interface UntagResourceCommandInput extends UntagResourceRequest {} export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} /** - *Unassigns a tag from a resource.
+ *Unassigns a tag from a retention rule.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/commands/UpdateRuleCommand.ts b/clients/client-rbin/src/commands/UpdateRuleCommand.ts index 01dac4877c1e3..b395de8e2b8e0 100644 --- a/clients/client-rbin/src/commands/UpdateRuleCommand.ts +++ b/clients/client-rbin/src/commands/UpdateRuleCommand.ts @@ -23,7 +23,7 @@ export interface UpdateRuleCommandOutput extends UpdateRuleResponse, __MetadataB /** *Updates an existing Recycle Bin retention rule. For more information, see - * Update Recycle Bin retention rules in the Amazon EC2 User Guide.
+ * Update Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-rbin/src/models/models_0.ts b/clients/client-rbin/src/models/models_0.ts index e00b6a676f00e..552acf4981758 100644 --- a/clients/client-rbin/src/models/models_0.ts +++ b/clients/client-rbin/src/models/models_0.ts @@ -1,7 +1,8 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; /** - *Information about a resource tag used to identify resources that are to be retained by a Recycle Bin retention rule.
+ *Information about the resource tags used to identify resources that are retained by the retention + * rule.
*/ export interface ResourceTag { /** @@ -26,6 +27,7 @@ export namespace ResourceTag { export enum ResourceType { EBS_SNAPSHOT = "EBS_SNAPSHOT", + EC2_IMAGE = "EC2_IMAGE", } export enum RetentionPeriodUnit { @@ -33,7 +35,7 @@ export enum RetentionPeriodUnit { } /** - *Information about the retention period for which a retention rule is to retain resources.
+ *Information about the retention period for which the retention rule is to retain resources.
*/ export interface RetentionPeriod { /** @@ -59,7 +61,7 @@ export namespace RetentionPeriod { } /** - *Information about the tags assigned to a Recycle Bin retention rule.
+ *Information about the tags to assign to the retention rule.
*/ export interface Tag { /** @@ -89,7 +91,7 @@ export interface CreateRuleRequest { RetentionPeriod: RetentionPeriod | undefined; /** - *A brief description for the retention rule.
+ *The retention rule description.
*/ Description?: string; @@ -99,18 +101,21 @@ export interface CreateRuleRequest { Tags?: Tag[]; /** - *The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are - * supported.
+ *The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots
+ * and EBS-backed AMIs are supported. To retain snapshots, specify EBS_SNAPSHOT
. To
+ * retain EBS-backed AMIs, specify EC2_IMAGE
.
Information about the resource tags to use to identify resources that are to be retained - * by the retention rule. The retention rule retains only deleted snapshots that have one or more - * of the specified tag key and value pairs. If a snapshot is deleted, but it does not have - * any of the specified tag key and value pairs, it is immediately deleted without being retained - * by the retention rule.
+ *Specifies the resource tags to use to identify resources that are to be retained by a + * tag-level retention rule. For tag-level retention rules, only deleted resources, of the specified resource type, that + * have one or more of the specified tag key and value pairs are retained. If a resource is deleted, but it does not have + * any of the specified tag key and value pairs, it is immediately deleted without being retained by the retention rule.
*You can add the same tag key and value pair to a maximum or five retention rules.
+ *To create a Region-level retention rule, omit this parameter. A Region-level retention rule + * does not have any resource tags specified. It retains all deleted resources of the specified + * resource type in the Region in which the rule is created, even if the resources are not tagged.
*/ ResourceTags?: ResourceTag[]; } @@ -131,12 +136,12 @@ export enum RuleStatus { export interface CreateRuleResponse { /** - *The unique identifier of the retention rule.
+ *The unique ID of the retention rule.
*/ Identifier?: string; /** - *Information about the retention period for which a retention rule is to retain resources.
+ *Information about the retention period for which the retention rule is to retain resources.
*/ RetentionPeriod?: RetentionPeriod; @@ -146,7 +151,7 @@ export interface CreateRuleResponse { Description?: string; /** - *The tags assigned to the retention rule.
+ *Information about the tags assigned to the retention rule.
*/ Tags?: Tag[]; @@ -162,7 +167,8 @@ export interface CreateRuleResponse { ResourceTags?: ResourceTag[]; /** - *The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
+ * state retain resources.
The unique ID of the retention rule to delete.
+ *The unique ID of the retention rule.
*/ Identifier: string | undefined; } @@ -287,27 +293,29 @@ export interface GetRuleResponse { Identifier?: string; /** - *The description assigned to the retention rule.
+ *The retention rule description.
*/ Description?: string; /** - *The resource type retained by the retention rule. Currently, only Amazon EBS snapshots are supported.
+ *The resource type retained by the retention rule.
*/ ResourceType?: ResourceType | string; /** - *Information about the period for which the retention rule retains resources.
+ *Information about the retention period for which the retention rule is to retain resources.
*/ RetentionPeriod?: RetentionPeriod; /** - *The resource tags used to identify resources that are to be retained by the retention rule.
+ *Information about the resource tags used to identify resources that are retained by the retention + * rule.
*/ ResourceTags?: ResourceTag[]; /** - *The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
+ * state retain resources.
The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of results to return with a single call.
+ * To retrieve the remaining results, make another call with the returned NextToken
value.
The token to use to retrieve the next page of results.
+ *The token for the next page of results.
*/ NextToken?: string; /** - *The resource type retained by the retention rule. Only retention rules that retain the specified resource type - * are listed.
+ *The resource type retained by the retention rule. Only retention rules that retain
+ * the specified resource type are listed. Currently, only Amazon EBS snapshots and EBS-backed
+ * AMIs are supported. To list retention rules that retain snapshots, specify
+ * EBS_SNAPSHOT
. To list retention rules that retain EBS-backed AMIs, specify
+ * EC2_IMAGE
.
The tags used to identify resources that are to be retained by the retention rule.
+ *Information about the resource tags used to identify resources that are retained by the retention + * rule.
*/ ResourceTags?: ResourceTag[]; } @@ -363,12 +376,12 @@ export interface RuleSummary { Identifier?: string; /** - *The description for the retention rule.
+ *The retention rule description.
*/ Description?: string; /** - *Information about the retention period for which the retention rule retains resources
+ *Information about the retention period for which the retention rule is to retain resources.
*/ RetentionPeriod?: RetentionPeriod; } @@ -405,7 +418,7 @@ export namespace ListRulesResponse { export interface ListTagsForResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource for which to list the tags.
+ *The Amazon Resource Name (ARN) of the retention rule.
*/ ResourceArn: string | undefined; } @@ -421,7 +434,7 @@ export namespace ListTagsForResourceRequest { export interface ListTagsForResourceResponse { /** - *Information about the tags assigned to the resource.
+ *Information about the tags assigned to the retention rule.
*/ Tags?: Tag[]; } @@ -437,12 +450,12 @@ export namespace ListTagsForResourceResponse { export interface TagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource to which to assign the tags.
+ *The Amazon Resource Name (ARN) of the retention rule.
*/ ResourceArn: string | undefined; /** - *Information about the tags to assign to the resource.
+ *Information about the tags to assign to the retention rule.
*/ Tags: Tag[] | undefined; } @@ -469,12 +482,12 @@ export namespace TagResourceResponse { export interface UntagResourceRequest { /** - *The Amazon Resource Name (ARN) of the resource from which to unassign the tags.
+ *The Amazon Resource Name (ARN) of the retention rule.
*/ ResourceArn: string | undefined; /** - *Information about the tags to unassign from the resource.
+ *The tag keys of the tags to unassign. All tags that have the specified tag key are unassigned.
*/ TagKeys: string[] | undefined; } @@ -501,7 +514,7 @@ export namespace UntagResourceResponse { export interface UpdateRuleRequest { /** - *The unique ID of the retention rule to update.
+ *The unique ID of the retention rule.
*/ Identifier: string | undefined; @@ -516,17 +529,21 @@ export interface UpdateRuleRequest { Description?: string; /** - *The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are supported.
+ *The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots
+ * and EBS-backed AMIs are supported. To retain snapshots, specify EBS_SNAPSHOT
. To
+ * retain EBS-backed AMIs, specify EC2_IMAGE
.
Information about the resource tags to use to identify resources that are to be retained - * by the retention rule. The retention rule retains only deleted snapshots that have one or more - * of the specified tag key and value pairs. If a snapshot is deleted, but it does not have - * any of the specified tag key and value pairs, it is immediately deleted without being retained - * by the retention rule.
+ *Specifies the resource tags to use to identify resources that are to be retained by a + * tag-level retention rule. For tag-level retention rules, only deleted resources, of the specified resource type, that + * have one or more of the specified tag key and value pairs are retained. If a resource is deleted, but it does not have + * any of the specified tag key and value pairs, it is immediately deleted without being retained by the retention rule.
*You can add the same tag key and value pair to a maximum or five retention rules.
+ *To create a Region-level retention rule, omit this parameter. A Region-level retention rule + * does not have any resource tags specified. It retains all deleted resources of the specified + * resource type in the Region in which the rule is created, even if the resources are not tagged.
*/ ResourceTags?: ResourceTag[]; } @@ -547,7 +564,7 @@ export interface UpdateRuleResponse { Identifier?: string; /** - *Information about the retention period for which a retention rule is to retain resources.
+ *Information about the retention period for which the retention rule is to retain resources.
*/ RetentionPeriod?: RetentionPeriod; @@ -568,7 +585,8 @@ export interface UpdateRuleResponse { ResourceTags?: ResourceTag[]; /** - *The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
+ * state retain resources.
Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific - * AMI. The only supported engine is Oracle Database 19c Enterprise Edition with the January 2021 or later - * RU/RUR.
+ * AMI. The supported engines are the following: + *Oracle Database 12.1 Enterprise Edition with the January 2021 or later RU/RUR
+ *Oracle Database 19c Enterprise Edition with the January 2021 or later RU/RUR
+ *Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. * The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create * your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.
diff --git a/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts b/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts index b96e939659959..6c0b5cbe7c92a 100644 --- a/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts +++ b/clients/client-rds/src/commands/CreateCustomDBEngineVersionCommand.ts @@ -23,8 +23,15 @@ export interface CreateCustomDBEngineVersionCommandOutput extends DBEngineVersio /** *Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific - * AMI. The only supported engine is Oracle Database 19c Enterprise Edition with the January 2021 or later - * RU/RUR.
+ * AMI. The supported engines are the following: + *Oracle Database 12.1 Enterprise Edition with the January 2021 or later RU/RUR
+ *Oracle Database 19c Enterprise Edition with the January 2021 or later RU/RUR
+ *Amazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software. * The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create * your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.
diff --git a/clients/client-rds/src/models/models_0.ts b/clients/client-rds/src/models/models_0.ts index ca937b0df5f8c..9be54ebdf76ce 100644 --- a/clients/client-rds/src/models/models_0.ts +++ b/clients/client-rds/src/models/models_0.ts @@ -3429,7 +3429,7 @@ export interface CreateDBClusterMessage { *A DB subnet group to associate with this DB cluster.
*This setting is required to create a Multi-AZ DB cluster.
*Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -5764,7 +5764,9 @@ export interface CreateDBInstanceMessage { /** *A DB subnet group to associate with this DB instance.
- *If there is no DB subnet group, then it is a non-VPC DB instance.
+ *Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
+ *Example: mydbsubnetgroup
+ *
Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
The name for the DB subnet group. This value is stored as a lowercase string.
- *Constraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.
- *Example: mySubnetgroup
+ *
Constraints:
+ *Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.
+ *Must not be default.
+ *First character must be a letter.
+ *Example: mydbsubnetgroup
*
SNS has responded that there is a problem with the SND topic specified.
+ *SNS has responded that there is a problem with the SNS topic specified.
*/ export interface SNSInvalidTopicFault extends __SmithyException, $MetadataBearer { name: "SNSInvalidTopicFault"; @@ -10333,9 +10346,8 @@ export interface DeleteDBSubnetGroupMessage { *You can't delete the default subnet group.
*Constraints:
*Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
ApplyImmediately
.
* This parameter doesn't apply to RDS Custom.
*Constraints: If supplied, must match the name of an existing DBSubnetGroup.
- *Example: mySubnetGroup
+ *
Example: mydbsubnetgroup
*
Constraints: Must match the name of an existing DBSubnetGroup. Must not be default.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
A DB subnet group to associate with the restored DB cluster.
- *Constraints: If supplied, must match the name of an existing DBSubnetGroup. - *
- *Example: mySubnetgroup
- *
Constraints: If supplied, must match the name of an existing DBSubnetGroup.
+ *Example: mydbsubnetgroup
+ *
The name of the DB subnet group to use for the new DB cluster.
*Constraints: If supplied, must match the name of an existing DB subnet group.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -7477,7 +7476,7 @@ export interface RestoreDBClusterToPointInTimeMessage { /** *The DB subnet group name to use for the new DB cluster.
*Constraints: If supplied, must match the name of an existing DBSubnetGroup.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
Valid for: Aurora DB clusters and Multi-AZ DB clusters
*/ @@ -7812,7 +7811,7 @@ export interface RestoreDBInstanceFromDBSnapshotMessage { /** *The DB subnet group name to use for the new instance.
*Constraints: If supplied, must match the name of an existing DBSubnetGroup.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance. By default, tags are not copied.
+ *A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance.
+ *In most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you + * specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from + * the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance.
+ *For more information, see + * Copying tags to DB instance snapshots in the Amazon RDS User Guide.
*/ CopyTagsToSnapshot?: boolean; @@ -8113,8 +8117,7 @@ export interface RestoreDBInstanceFromDBSnapshotMessage { * *For the list of permissions required for the IAM role, see * - * Configure IAM and your VPC in the Amazon Relational Database Service - * User Guide.
+ * Configure IAM and your VPC in the Amazon RDS User Guide. *This setting is required for RDS Custom.
*/ CustomIamInstanceProfile?: string; @@ -8285,6 +8288,9 @@ export interface RestoreDBInstanceFromS3Message { /** *A DB subnet group to associate with this DB instance.
+ *Constraints: If supplied, must match the name of an existing DBSubnetGroup.
+ *Example: mydbsubnetgroup
+ *
The DB subnet group name to use for the new instance.
*Constraints: If supplied, must match the name of an existing DBSubnetGroup.
- *Example: mySubnetgroup
+ *
Example: mydbsubnetgroup
*
Cancels the specified deployment job.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deploys a specific version of a robot application to robots in a fleet.
+ *This API is no longer supported and will throw an error if used.
+ *The robot application must have a numbered applicationVersion
for
* consistency reasons. To create a new version, use
* CreateRobotApplicationVersion
or see Creating a Robot Application Version.
Creates a fleet, a logical group of robots running the same robot application.
+ *This API is no longer supported and will throw an error if used.
+ *Creates a robot.
+ *This API is no longer supported and will throw an error if used.
+ *Deletes a fleet.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deletes a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deregisters a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a deployment job.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a fleet.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of deployment jobs for a fleet. You can optionally provide filters to - * retrieve specific deployment jobs.
+ * @deprecated + * + *Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of fleets. You can optionally provide filters to retrieve specific - * fleets.
+ * @deprecated + * + *Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of robots. You can optionally provide filters to retrieve specific - * robots.
+ * @deprecated + * + *Returns a list of robots. You can optionally provide filters to retrieve specific robots.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Registers a robot with a fleet.
+ *This API is no longer supported and will throw an error if used.
+ *Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were - * added after a deployment.
+ * @deprecated + * + *Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Cancels the specified deployment job.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deploys a specific version of a robot application to robots in a fleet.
+ *This API is no longer supported and will throw an error if used.
+ *The robot application must have a numbered applicationVersion
for
* consistency reasons. To create a new version, use
* CreateRobotApplicationVersion
or see Creating a Robot Application Version.
Creates a fleet, a logical group of robots running the same robot application.
+ *This API is no longer supported and will throw an error if used.
+ *Creates a robot.
+ *This API is no longer supported and will throw an error if used.
+ *Deletes a fleet.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deletes a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Deregisters a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a deployment job.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a fleet.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Describes a robot.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of deployment jobs for a fleet. You can optionally provide filters to - * retrieve specific deployment jobs.
+ * @deprecated + * + *Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of fleets. You can optionally provide filters to retrieve specific - * fleets.
+ * @deprecated + * + *Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Returns a list of robots. You can optionally provide filters to retrieve specific - * robots.
+ * @deprecated + * + *Returns a list of robots. You can optionally provide filters to retrieve specific robots.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *Registers a robot with a fleet.
+ *This API is no longer supported and will throw an error if used.
+ *Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were - * added after a deployment.
+ * @deprecated + * + *Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.
+ *This API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
+ *AppFlow/Requester has invalid or missing permissions.
", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, "com.amazonaws.appflow#AccessKeyId": { "type": "string", "traits": { @@ -55,7 +68,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 512 + "max": 2048 }, "smithy.api#pattern": "^\\S+$", "smithy.api#sensitive": {} @@ -169,7 +182,29 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "^\\S+$" + "smithy.api#pattern": "^\\S+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.appflow#ApiKeyCredentials": { + "type": "structure", + "members": { + "apiKey": { + "target": "com.amazonaws.appflow#ApiKey", + "traits": { + "smithy.api#documentation": "The API key required for API key authentication.
", + "smithy.api#required": {} + } + }, + "apiSecretKey": { + "target": "com.amazonaws.appflow#ApiSecretKey", + "traits": { + "smithy.api#documentation": "The API secret key required for API key authentication.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The API key credentials required for API key authentication.
" } }, "com.amazonaws.appflow#ApiSecretKey": { @@ -193,6 +228,16 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#ApiVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#ApplicationHostUrl": { "type": "string", "traits": { @@ -223,6 +268,16 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#ApplicationType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#AuthCode": { "type": "string", "traits": { @@ -243,6 +298,129 @@ "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" } }, + "com.amazonaws.appflow#AuthCodeUrlList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#AuthCodeUrl" + } + }, + "com.amazonaws.appflow#AuthParameter": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.appflow#Key", + "traits": { + "smithy.api#documentation": "The authentication key required to authenticate with the connector.
" + } + }, + "isRequired": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether this authentication parameter is required.
" + } + }, + "label": { + "target": "com.amazonaws.appflow#Label", + "traits": { + "smithy.api#documentation": "Label used for authentication parameter.
" + } + }, + "description": { + "target": "com.amazonaws.appflow#Description", + "traits": { + "smithy.api#documentation": "A description about the authentication parameter.
" + } + }, + "isSensitiveField": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether this authentication parameter is a sensitive field.
" + } + }, + "connectorSuppliedValues": { + "target": "com.amazonaws.appflow#ConnectorSuppliedValueList", + "traits": { + "smithy.api#documentation": "Contains default values for this authentication parameter that are supplied by the\n connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about required authentication parameters.
" + } + }, + "com.amazonaws.appflow#AuthParameterList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#AuthParameter" + } + }, + "com.amazonaws.appflow#AuthenticationConfig": { + "type": "structure", + "members": { + "isBasicAuthSupported": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether basic authentication is supported by the connector.
" + } + }, + "isApiKeyAuthSupported": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether API key authentication is supported by the connector
" + } + }, + "isOAuth2Supported": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether OAuth 2.0 authentication is supported by the connector.
" + } + }, + "isCustomAuthSupported": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether custom authentication is supported by the connector
" + } + }, + "oAuth2Defaults": { + "target": "com.amazonaws.appflow#OAuth2Defaults", + "traits": { + "smithy.api#documentation": "Contains the default values required for OAuth 2.0 authentication.
" + } + }, + "customAuthConfigs": { + "target": "com.amazonaws.appflow#CustomAuthConfigList", + "traits": { + "smithy.api#documentation": "Contains information required for custom authentication.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the authentication config that the connector supports.
" + } + }, + "com.amazonaws.appflow#AuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "OAUTH2", + "name": "OAUTH2" + }, + { + "value": "APIKEY", + "name": "APIKEY" + }, + { + "value": "BASIC", + "name": "BASIC" + }, + { + "value": "CUSTOM", + "name": "CUSTOM" + } + ] + } + }, "com.amazonaws.appflow#BasicAuthCredentials": { "type": "structure", "members": { @@ -421,6 +599,114 @@ "traits": { "smithy.api#documentation": " Specifies connector-specific metadata such as oAuthScopes
,\n supportedRegions
, privateLinkServiceUrl
, and so on.
The connector type.
" + } + }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label used for registering the connector.
" + } + }, + "connectorDescription": { + "target": "com.amazonaws.appflow#ConnectorDescription", + "traits": { + "smithy.api#documentation": "A description about the connector.
" + } + }, + "connectorOwner": { + "target": "com.amazonaws.appflow#ConnectorOwner", + "traits": { + "smithy.api#documentation": "The owner who developed the connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.appflow#ConnectorName", + "traits": { + "smithy.api#documentation": "The connector name.
" + } + }, + "connectorVersion": { + "target": "com.amazonaws.appflow#ConnectorVersion", + "traits": { + "smithy.api#documentation": "The connector version.
" + } + }, + "connectorArn": { + "target": "com.amazonaws.appflow#ARN", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) for the registered connector.
" + } + }, + "connectorModes": { + "target": "com.amazonaws.appflow#ConnectorModeList", + "traits": { + "smithy.api#documentation": "The connection modes that the connector supports.
" + } + }, + "authenticationConfig": { + "target": "com.amazonaws.appflow#AuthenticationConfig", + "traits": { + "smithy.api#documentation": "The authentication config required for the connector.
" + } + }, + "connectorRuntimeSettings": { + "target": "com.amazonaws.appflow#ConnectorRuntimeSettingList", + "traits": { + "smithy.api#documentation": "The required connector runtime settings.
" + } + }, + "supportedApiVersions": { + "target": "com.amazonaws.appflow#SupportedApiVersionList", + "traits": { + "smithy.api#documentation": "A list of API versions that are supported by the connector.
" + } + }, + "supportedOperators": { + "target": "com.amazonaws.appflow#SupportedOperatorList", + "traits": { + "smithy.api#documentation": "A list of operators supported by the connector.
" + } + }, + "supportedWriteOperations": { + "target": "com.amazonaws.appflow#SupportedWriteOperationList", + "traits": { + "smithy.api#documentation": "A list of write operations supported by the connector.
" + } + }, + "connectorProvisioningType": { + "target": "com.amazonaws.appflow#ConnectorProvisioningType", + "traits": { + "smithy.api#documentation": "The provisioning type used to register the connector.
" + } + }, + "connectorProvisioningConfig": { + "target": "com.amazonaws.appflow#ConnectorProvisioningConfig", + "traits": { + "smithy.api#documentation": "The configuration required for registering the connector.
" + } + }, + "logoURL": { + "target": "com.amazonaws.appflow#LogoURL", + "traits": { + "smithy.api#documentation": "Logo URL of the connector.
" + } + }, + "registeredAt": { + "target": "com.amazonaws.appflow#Date", + "traits": { + "smithy.api#documentation": "The date on which the connector was registered.
" + } + }, + "registeredBy": { + "target": "com.amazonaws.appflow#RegisteredBy", + "traits": { + "smithy.api#documentation": "Information about who registered the connector.
" + } } }, "traits": { @@ -436,6 +722,90 @@ "target": "com.amazonaws.appflow#ConnectorConfiguration" } }, + "com.amazonaws.appflow#ConnectorDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^[\\w!@#\\-.?,\\s]*$" + } + }, + "com.amazonaws.appflow#ConnectorDetail": { + "type": "structure", + "members": { + "connectorDescription": { + "target": "com.amazonaws.appflow#ConnectorDescription", + "traits": { + "smithy.api#documentation": "A description about the registered connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.appflow#ConnectorName", + "traits": { + "smithy.api#documentation": "The name of the connector.
" + } + }, + "connectorOwner": { + "target": "com.amazonaws.appflow#ConnectorOwner", + "traits": { + "smithy.api#documentation": "The owner of the connector.
" + } + }, + "connectorVersion": { + "target": "com.amazonaws.appflow#ConnectorVersion", + "traits": { + "smithy.api#documentation": "The connector version.
" + } + }, + "applicationType": { + "target": "com.amazonaws.appflow#ApplicationType", + "traits": { + "smithy.api#documentation": "The application type of the connector.
" + } + }, + "connectorType": { + "target": "com.amazonaws.appflow#ConnectorType", + "traits": { + "smithy.api#documentation": "The connector type.
" + } + }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "A label used for the connector.
" + } + }, + "registeredAt": { + "target": "com.amazonaws.appflow#Date", + "traits": { + "smithy.api#documentation": "The time at which the connector was registered.
" + } + }, + "registeredBy": { + "target": "com.amazonaws.appflow#RegisteredBy", + "traits": { + "smithy.api#documentation": "The user who registered the connector.
" + } + }, + "connectorProvisioningType": { + "target": "com.amazonaws.appflow#ConnectorProvisioningType", + "traits": { + "smithy.api#documentation": "The provisioning type that the connector uses.
" + } + }, + "connectorModes": { + "target": "com.amazonaws.appflow#ConnectorModeList", + "traits": { + "smithy.api#documentation": "The connection mode that the connector supports.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the registered connector.
" + } + }, "com.amazonaws.appflow#ConnectorEntity": { "type": "structure", "members": { @@ -473,26 +843,50 @@ "smithy.api#required": {} } }, + "parentIdentifier": { + "target": "com.amazonaws.appflow#Identifier", + "traits": { + "smithy.api#documentation": "The parent identifier of the connector field.
" + } + }, "label": { "target": "com.amazonaws.appflow#Label", "traits": { "smithy.api#documentation": "The label applied to a connector entity field.
" } }, - "supportedFieldTypeDetails": { - "target": "com.amazonaws.appflow#SupportedFieldTypeDetails", + "isPrimaryKey": { + "target": "com.amazonaws.appflow#Boolean", "traits": { - "smithy.api#documentation": " Contains details regarding the supported FieldType
, including the\n corresponding filterOperators
and supportedValues
.
Booelan value that indicates whether this field can be used as a primary key.
" } }, - "description": { - "target": "com.amazonaws.appflow#Description", + "defaultValue": { + "target": "com.amazonaws.appflow#String", "traits": { - "smithy.api#documentation": "A description of the connector entity field.
" + "smithy.api#documentation": "Default value that can be assigned to this field.
" } }, - "sourceProperties": { - "target": "com.amazonaws.appflow#SourceFieldProperties", + "isDeprecated": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Booelan value that indicates whether this field is deprecated or not.
" + } + }, + "supportedFieldTypeDetails": { + "target": "com.amazonaws.appflow#SupportedFieldTypeDetails", + "traits": { + "smithy.api#documentation": " Contains details regarding the supported FieldType
, including the\n corresponding filterOperators
and supportedValues
.
A description of the connector entity field.
" + } + }, + "sourceProperties": { + "target": "com.amazonaws.appflow#SourceFieldProperties", "traits": { "smithy.api#documentation": "The properties that can be applied to a field when the connector is being used as a\n source.
" } @@ -502,6 +896,12 @@ "traits": { "smithy.api#documentation": "The properties applied to a field when the connector is being used as a destination.\n
" } + }, + "customProperties": { + "target": "com.amazonaws.appflow#CustomProperties", + "traits": { + "smithy.api#documentation": "A map that has specific properties related to the ConnectorEntityField.
" + } } }, "traits": { @@ -529,6 +929,22 @@ "target": "com.amazonaws.appflow#ConnectorEntityList" } }, + "com.amazonaws.appflow#ConnectorLabel": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z0-9][\\w!@#.-]+$" + } + }, + "com.amazonaws.appflow#ConnectorList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#ConnectorDetail" + } + }, "com.amazonaws.appflow#ConnectorMetadata": { "type": "structure", "members": { @@ -660,6 +1076,32 @@ "smithy.api#documentation": " A structure to specify connector-specific metadata such as oAuthScopes
,\n supportedRegions
, privateLinkServiceUrl
, and so on.
The operation to be performed on the provided SAPOData source fields.
" } + }, + "CustomConnector": { + "target": "com.amazonaws.appflow#Operator", + "traits": { + "smithy.api#documentation": "Operators supported by the custom connector.
" + } } }, "traits": { "smithy.api#documentation": "The operation to be performed on the provided source fields.
" } }, + "com.amazonaws.appflow#ConnectorOwner": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, "com.amazonaws.appflow#ConnectorProfile": { "type": "structure", "members": { @@ -799,6 +1257,12 @@ "smithy.api#documentation": "The type of connector, such as Salesforce, Amplitude, and so on.
" } }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label for the connector profile being created.
" + } + }, "connectionMode": { "target": "com.amazonaws.appflow#ConnectionMode", "traits": { @@ -847,7 +1311,7 @@ "min": 0, "max": 512 }, - "smithy.api#pattern": "^arn:aws:kms:.*:[0-9]+:" + "smithy.api#pattern": "^arn:aws:appflow:.*:[0-9]+:" } }, "com.amazonaws.appflow#ConnectorProfileConfig": { @@ -973,6 +1437,9 @@ }, "SAPOData": { "target": "com.amazonaws.appflow#SAPODataConnectorProfileCredentials" + }, + "CustomConnector": { + "target": "com.amazonaws.appflow#CustomConnectorProfileCredentials" } }, "traits": { @@ -1108,12 +1575,120 @@ }, "SAPOData": { "target": "com.amazonaws.appflow#SAPODataConnectorProfileProperties" + }, + "CustomConnector": { + "target": "com.amazonaws.appflow#CustomConnectorProfileProperties", + "traits": { + "smithy.api#documentation": "The properties required by the custom connector.
" + } } }, "traits": { "smithy.api#documentation": "The connector-specific profile properties required by each connector.
" } }, + "com.amazonaws.appflow#ConnectorProvisioningConfig": { + "type": "structure", + "members": { + "lambda": { + "target": "com.amazonaws.appflow#LambdaConnectorProvisioningConfig", + "traits": { + "smithy.api#documentation": "Contains information about the configuration of the lambda which is being registered as\n the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the configuration of the connector being registered.
" + } + }, + "com.amazonaws.appflow#ConnectorProvisioningType": { + "type": "string", + "traits": { + "smithy.api#documentation": "The type of provisioning that the connector supports, such as Lambda.
", + "smithy.api#enum": [ + { + "value": "LAMBDA", + "name": "LAMBDA" + } + ] + } + }, + "com.amazonaws.appflow#ConnectorRuntimeSetting": { + "type": "structure", + "members": { + "key": { + "target": "com.amazonaws.appflow#Key", + "traits": { + "smithy.api#documentation": "Contains value information about the connector runtime setting.
" + } + }, + "dataType": { + "target": "com.amazonaws.appflow#ConnectorRuntimeSettingDataType", + "traits": { + "smithy.api#documentation": "Data type of the connector runtime setting.
" + } + }, + "isRequired": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates whether this connector runtime setting is required.
" + } + }, + "label": { + "target": "com.amazonaws.appflow#Label", + "traits": { + "smithy.api#documentation": "A label used for connector runtime setting.
" + } + }, + "description": { + "target": "com.amazonaws.appflow#Description", + "traits": { + "smithy.api#documentation": "A description about the connector runtime setting.
" + } + }, + "scope": { + "target": "com.amazonaws.appflow#ConnectorRuntimeSettingScope", + "traits": { + "smithy.api#documentation": "Indicates the scope of the connector runtime setting.
" + } + }, + "connectorSuppliedValueOptions": { + "target": "com.amazonaws.appflow#ConnectorSuppliedValueOptionList", + "traits": { + "smithy.api#documentation": "Contains default values for the connector runtime setting that are supplied by the\n connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the connector runtime settings that are required for flow\n execution.
" + } + }, + "com.amazonaws.appflow#ConnectorRuntimeSettingDataType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.appflow#ConnectorRuntimeSettingList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#ConnectorRuntimeSetting" + } + }, + "com.amazonaws.appflow#ConnectorRuntimeSettingScope": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#ConnectorServerException": { "type": "structure", "members": { @@ -1127,6 +1702,28 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.appflow#ConnectorSuppliedValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.appflow#ConnectorSuppliedValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#ConnectorSuppliedValue" + } + }, + "com.amazonaws.appflow#ConnectorSuppliedValueOptionList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#ConnectorSuppliedValue" + } + }, "com.amazonaws.appflow#ConnectorType": { "type": "string", "traits": { @@ -1218,6 +1815,10 @@ { "value": "SAPOData", "name": "SAPODATA" + }, + { + "value": "CustomConnector", + "name": "CUSTOMCONNECTOR" } ] } @@ -1234,6 +1835,16 @@ } } }, + "com.amazonaws.appflow#ConnectorVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#CreateConnectorProfile": { "type": "operation", "input": { @@ -1260,7 +1871,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new connector profile associated with your Amazon Web Services account. There is a soft quota\n of 100 connector profiles per Amazon Web Services account. If you need more connector profiles than this quota\n allows, you can submit a request to the Amazon AppFlow team through the Amazon AppFlow support\n channel.
", + "smithy.api#documentation": "Creates a new connector profile associated with your Amazon Web Services account. There\n is a soft quota of 100 connector profiles per Amazon Web Services account. If you need more\n connector profiles than this quota allows, you can submit a request to the Amazon AppFlow team\n through the Amazon AppFlow support channel.
", "smithy.api#http": { "method": "POST", "uri": "/create-connector-profile", @@ -1291,154 +1902,405 @@ "smithy.api#required": {} } }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label of the connector. The label is unique for each\n ConnectorRegistration
in your Amazon Web Services account. Only needed if\n calling for CUSTOMCONNECTOR connector type/.
Indicates the connection mode and specifies whether it is public or private. Private\n flows use Amazon Web Services PrivateLink to route data over Amazon Web Services infrastructure without exposing it to the\n public internet.
", - "smithy.api#required": {} + "smithy.api#documentation": "Indicates the connection mode and specifies whether it is public or private. Private\n flows use Amazon Web Services PrivateLink to route data over Amazon Web Services infrastructure\n without exposing it to the public internet.
", + "smithy.api#required": {} + } + }, + "connectorProfileConfig": { + "target": "com.amazonaws.appflow#ConnectorProfileConfig", + "traits": { + "smithy.api#documentation": "Defines the connector-specific configuration and credentials.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.appflow#CreateConnectorProfileResponse": { + "type": "structure", + "members": { + "connectorProfileArn": { + "target": "com.amazonaws.appflow#ConnectorProfileArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector profile.
" + } + } + } + }, + "com.amazonaws.appflow#CreateFlow": { + "type": "operation", + "input": { + "target": "com.amazonaws.appflow#CreateFlowRequest" + }, + "output": { + "target": "com.amazonaws.appflow#CreateFlowResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appflow#ConflictException" + }, + { + "target": "com.amazonaws.appflow#ConnectorAuthenticationException" + }, + { + "target": "com.amazonaws.appflow#ConnectorServerException" + }, + { + "target": "com.amazonaws.appflow#InternalServerException" + }, + { + "target": "com.amazonaws.appflow#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appflow#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.appflow#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Enables your application to create a new flow using Amazon AppFlow. You must create a\n connector profile before calling this API. Please note that the Request Syntax below shows\n syntax for multiple destinations, however, you can only transfer data to one item in this list\n at a time. Amazon AppFlow does not currently support flows to multiple destinations at once.
", + "smithy.api#http": { + "method": "POST", + "uri": "/create-flow", + "code": 200 + } + } + }, + "com.amazonaws.appflow#CreateFlowRequest": { + "type": "structure", + "members": { + "flowName": { + "target": "com.amazonaws.appflow#FlowName", + "traits": { + "smithy.api#documentation": "The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens\n (-) only.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.appflow#FlowDescription", + "traits": { + "smithy.api#documentation": "A description of the flow you want to create.
" + } + }, + "kmsArn": { + "target": "com.amazonaws.appflow#KMSArn", + "traits": { + "smithy.api#documentation": "The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for\n encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If\n you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key.\n
" + } + }, + "triggerConfig": { + "target": "com.amazonaws.appflow#TriggerConfig", + "traits": { + "smithy.api#documentation": "The trigger settings that determine how and when the flow runs.
", + "smithy.api#required": {} + } + }, + "sourceFlowConfig": { + "target": "com.amazonaws.appflow#SourceFlowConfig", + "traits": { + "smithy.api#documentation": "The configuration that controls how Amazon AppFlow retrieves data from the source\n connector.
", + "smithy.api#required": {} + } + }, + "destinationFlowConfigList": { + "target": "com.amazonaws.appflow#DestinationFlowConfigList", + "traits": { + "smithy.api#documentation": "The configuration that controls how Amazon AppFlow places data in the destination\n connector.
", + "smithy.api#required": {} + } + }, + "tasks": { + "target": "com.amazonaws.appflow#Tasks", + "traits": { + "smithy.api#documentation": "A list of tasks that Amazon AppFlow performs while transferring the data in the flow run.\n
", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.appflow#TagMap", + "traits": { + "smithy.api#documentation": "The tags used to organize, track, or control access for your flow.
" + } + } + } + }, + "com.amazonaws.appflow#CreateFlowResponse": { + "type": "structure", + "members": { + "flowArn": { + "target": "com.amazonaws.appflow#FlowArn", + "traits": { + "smithy.api#documentation": "The flow's Amazon Resource Name (ARN).
" + } + }, + "flowStatus": { + "target": "com.amazonaws.appflow#FlowStatus", + "traits": { + "smithy.api#documentation": "Indicates the current status of the flow.
" + } + } + } + }, + "com.amazonaws.appflow#CreatedBy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.appflow#CredentialsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.appflow#CredentialsMapKey" + }, + "value": { + "target": "com.amazonaws.appflow#CredentialsMapValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.appflow#CredentialsMapKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.appflow#CredentialsMapValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^\\S+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.appflow#CustomAuthConfig": { + "type": "structure", + "members": { + "customAuthenticationType": { + "target": "com.amazonaws.appflow#CustomAuthenticationType", + "traits": { + "smithy.api#documentation": "The authentication type that the custom connector uses.
" } }, - "connectorProfileConfig": { - "target": "com.amazonaws.appflow#ConnectorProfileConfig", + "authParameters": { + "target": "com.amazonaws.appflow#AuthParameterList", "traits": { - "smithy.api#documentation": "Defines the connector-specific configuration and credentials.
", - "smithy.api#required": {} + "smithy.api#documentation": "Information about authentication parameters required for authentication.
" } } + }, + "traits": { + "smithy.api#documentation": "Configuration information required for custom authentication.
" } }, - "com.amazonaws.appflow#CreateConnectorProfileResponse": { + "com.amazonaws.appflow#CustomAuthConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#CustomAuthConfig" + } + }, + "com.amazonaws.appflow#CustomAuthCredentials": { "type": "structure", "members": { - "connectorProfileArn": { - "target": "com.amazonaws.appflow#ConnectorProfileArn", + "customAuthenticationType": { + "target": "com.amazonaws.appflow#CustomAuthenticationType", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector profile.
" + "smithy.api#documentation": "The custom authentication type that the connector uses.
", + "smithy.api#required": {} + } + }, + "credentialsMap": { + "target": "com.amazonaws.appflow#CredentialsMap", + "traits": { + "smithy.api#documentation": "A map that holds custom authentication credentials.
" } } + }, + "traits": { + "smithy.api#documentation": "The custom credentials required for custom authentication.
" } }, - "com.amazonaws.appflow#CreateFlow": { - "type": "operation", - "input": { - "target": "com.amazonaws.appflow#CreateFlowRequest" - }, - "output": { - "target": "com.amazonaws.appflow#CreateFlowResponse" - }, - "errors": [ - { - "target": "com.amazonaws.appflow#ConflictException" - }, - { - "target": "com.amazonaws.appflow#ConnectorAuthenticationException" - }, - { - "target": "com.amazonaws.appflow#ConnectorServerException" - }, - { - "target": "com.amazonaws.appflow#InternalServerException" - }, - { - "target": "com.amazonaws.appflow#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.appflow#ServiceQuotaExceededException" - }, - { - "target": "com.amazonaws.appflow#ValidationException" - } - ], + "com.amazonaws.appflow#CustomAuthenticationType": { + "type": "string", "traits": { - "smithy.api#documentation": "Enables your application to create a new flow using Amazon AppFlow. You must create a\n connector profile before calling this API. Please note that the Request Syntax below shows\n syntax for multiple destinations, however, you can only transfer data to one item in this list\n at a time. Amazon AppFlow does not currently support flows to multiple destinations at once.
", - "smithy.api#http": { - "method": "POST", - "uri": "/create-flow", - "code": 200 - } + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" } }, - "com.amazonaws.appflow#CreateFlowRequest": { + "com.amazonaws.appflow#CustomConnectorDestinationProperties": { "type": "structure", "members": { - "flowName": { - "target": "com.amazonaws.appflow#FlowName", + "entityName": { + "target": "com.amazonaws.appflow#EntityName", "traits": { - "smithy.api#documentation": "The specified name of the flow. Spaces are not allowed. Use underscores (_) or hyphens\n (-) only.
", + "smithy.api#documentation": "The entity specified in the custom connector as a destination in the flow.
", "smithy.api#required": {} } }, - "description": { - "target": "com.amazonaws.appflow#FlowDescription", + "errorHandlingConfig": { + "target": "com.amazonaws.appflow#ErrorHandlingConfig", "traits": { - "smithy.api#documentation": "A description of the flow you want to create.
" + "smithy.api#documentation": "The settings that determine how Amazon AppFlow handles an error when placing data in the\n custom connector as destination.
" } }, - "kmsArn": { - "target": "com.amazonaws.appflow#KMSArn", + "writeOperationType": { + "target": "com.amazonaws.appflow#WriteOperationType", "traits": { - "smithy.api#documentation": "The ARN (Amazon Resource Name) of the Key Management Service (KMS) key you provide for\n encryption. This is required if you do not want to use the Amazon AppFlow-managed KMS key. If\n you don't provide anything here, Amazon AppFlow uses the Amazon AppFlow-managed KMS key.\n
" + "smithy.api#documentation": "Specifies the type of write operation to be performed in the custom connector when it's\n used as destination.
" } }, - "triggerConfig": { - "target": "com.amazonaws.appflow#TriggerConfig", + "idFieldNames": { + "target": "com.amazonaws.appflow#IdFieldNameList", "traits": { - "smithy.api#documentation": "The trigger settings that determine how and when the flow runs.
", - "smithy.api#required": {} + "smithy.api#documentation": "The name of the field that Amazon AppFlow uses as an ID when performing a write operation\n such as update, delete, or upsert.
" } }, - "sourceFlowConfig": { - "target": "com.amazonaws.appflow#SourceFlowConfig", + "customProperties": { + "target": "com.amazonaws.appflow#CustomProperties", "traits": { - "smithy.api#documentation": "The configuration that controls how Amazon AppFlow retrieves data from the source\n connector.
", + "smithy.api#documentation": "The custom properties that are specific to the connector when it's used as a destination\n in the flow.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The properties that are applied when the custom connector is being used as a\n destination.
" + } + }, + "com.amazonaws.appflow#CustomConnectorProfileCredentials": { + "type": "structure", + "members": { + "authenticationType": { + "target": "com.amazonaws.appflow#AuthenticationType", + "traits": { + "smithy.api#documentation": "The authentication type that the custom connector uses for authenticating while creating a\n connector profile.
", "smithy.api#required": {} } }, - "destinationFlowConfigList": { - "target": "com.amazonaws.appflow#DestinationFlowConfigList", + "basic": { + "target": "com.amazonaws.appflow#BasicAuthCredentials", "traits": { - "smithy.api#documentation": "The configuration that controls how Amazon AppFlow places data in the destination\n connector.
", - "smithy.api#required": {} + "smithy.api#documentation": "The basic credentials that are required for the authentication of the user.
" } }, - "tasks": { - "target": "com.amazonaws.appflow#Tasks", + "oauth2": { + "target": "com.amazonaws.appflow#OAuth2Credentials", "traits": { - "smithy.api#documentation": "A list of tasks that Amazon AppFlow performs while transferring the data in the flow run.\n
", - "smithy.api#required": {} + "smithy.api#documentation": "The OAuth 2.0 credentials required for the authentication of the user.
" } }, - "tags": { - "target": "com.amazonaws.appflow#TagMap", + "apiKey": { + "target": "com.amazonaws.appflow#ApiKeyCredentials", "traits": { - "smithy.api#documentation": "The tags used to organize, track, or control access for your flow.
" + "smithy.api#documentation": "The API keys required for the authentication of the user.
" + } + }, + "custom": { + "target": "com.amazonaws.appflow#CustomAuthCredentials", + "traits": { + "smithy.api#documentation": "If the connector uses the custom authentication mechanism, this holds the required\n credentials.
" } } + }, + "traits": { + "smithy.api#documentation": "The connector-specific profile credentials that are required when using the custom\n connector.
" } }, - "com.amazonaws.appflow#CreateFlowResponse": { + "com.amazonaws.appflow#CustomConnectorProfileProperties": { "type": "structure", "members": { - "flowArn": { - "target": "com.amazonaws.appflow#FlowArn", + "profileProperties": { + "target": "com.amazonaws.appflow#ProfilePropertiesMap", "traits": { - "smithy.api#documentation": "The flow's Amazon Resource Name (ARN).
" + "smithy.api#documentation": "A map of properties that are required to create a profile for the custom connector.
" } }, - "flowStatus": { - "target": "com.amazonaws.appflow#FlowStatus", + "oAuth2Properties": { + "target": "com.amazonaws.appflow#OAuth2Properties" + } + }, + "traits": { + "smithy.api#documentation": "The profile properties required by the custom connector.
" + } + }, + "com.amazonaws.appflow#CustomConnectorSourceProperties": { + "type": "structure", + "members": { + "entityName": { + "target": "com.amazonaws.appflow#EntityName", "traits": { - "smithy.api#documentation": "Indicates the current status of the flow.
" + "smithy.api#documentation": "The entity specified in the custom connector as a source in the flow.
", + "smithy.api#required": {} + } + }, + "customProperties": { + "target": "com.amazonaws.appflow#CustomProperties", + "traits": { + "smithy.api#documentation": "Custom properties that are required to use the custom connector as a source.
" } } + }, + "traits": { + "smithy.api#documentation": "The properties that are applied when the custom connector is being used as a\n source.
" } }, - "com.amazonaws.appflow#CreatedBy": { + "com.amazonaws.appflow#CustomProperties": { + "type": "map", + "key": { + "target": "com.amazonaws.appflow#CustomPropertyKey" + }, + "value": { + "target": "com.amazonaws.appflow#CustomPropertyValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.appflow#CustomPropertyKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w]+$" + } + }, + "com.amazonaws.appflow#CustomPropertyValue": { "type": "string", "traits": { "smithy.api#length": { "min": 0, - "max": 256 + "max": 2048 }, "smithy.api#pattern": "^\\S+$" } @@ -1735,6 +2597,34 @@ "type": "structure", "members": {} }, + "com.amazonaws.appflow#DescribeConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.appflow#DescribeConnectorRequest" + }, + "output": { + "target": "com.amazonaws.appflow#DescribeConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appflow#InternalServerException" + }, + { + "target": "com.amazonaws.appflow#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appflow#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Describes the given custom connector registered in your Amazon Web Services account. This\n API can be used for custom connectors that are registered in your account and also for Amazon\n authored connectors.
", + "smithy.api#http": { + "method": "POST", + "uri": "/describe-connector", + "code": 200 + } + } + }, "com.amazonaws.appflow#DescribeConnectorEntity": { "type": "operation", "input": { @@ -1773,7 +2663,7 @@ "type": "structure", "members": { "connectorEntityName": { - "target": "com.amazonaws.appflow#Name", + "target": "com.amazonaws.appflow#EntityName", "traits": { "smithy.api#documentation": "The entity name for that connector.
", "smithy.api#required": {} @@ -1790,6 +2680,12 @@ "traits": { "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account.
The version of the API that's used by the connector.
" + } } } }, @@ -1850,6 +2746,12 @@ "smithy.api#documentation": "The type of connector, such as Salesforce, Amplitude, and so on.
" } }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The name of the connector. The name is unique for each ConnectorRegistration
\n in your Amazon Web Services account. Only needed if calling for CUSTOMCONNECTOR connector\n type/.
The connector type, such as CUSTOMCONNECTOR, Saleforce, Marketo. Please choose\n CUSTOMCONNECTOR for Lambda based custom connectors.
", + "smithy.api#required": {} + } + }, + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label of the connector. The label is unique for each\n ConnectorRegistration
in your Amazon Web Services account. Only needed if\n calling for CUSTOMCONNECTOR connector type/.
Configuration info of all the connectors that the user requested.
" + } + } + } + }, "com.amazonaws.appflow#DescribeConnectors": { "type": "operation", "input": { @@ -1906,7 +2837,8 @@ }, "smithy.api#paginated": { "inputToken": "nextToken", - "outputToken": "nextToken" + "outputToken": "nextToken", + "pageSize": "maxResults" } } }, @@ -1919,6 +2851,12 @@ "smithy.api#documentation": "The type of connector, such as Salesforce, Amplitude, and so on.
" } }, + "maxResults": { + "target": "com.amazonaws.appflow#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of items that should be returned in the result set. The default is\n 20.
" + } + }, "nextToken": { "target": "com.amazonaws.appflow#NextToken", "traits": { @@ -1936,6 +2874,12 @@ "smithy.api#documentation": "The configuration that is applied to the connectors used in the flow.
" } }, + "connectors": { + "target": "com.amazonaws.appflow#ConnectorList", + "traits": { + "smithy.api#documentation": "Information about the connectors supported in Amazon AppFlow.
" + } + }, "nextToken": { "target": "com.amazonaws.appflow#NextToken", "traits": { @@ -2228,6 +3172,12 @@ "traits": { "smithy.api#documentation": "The properties required to query Zendesk.
" } + }, + "CustomConnector": { + "target": "com.amazonaws.appflow#CustomConnectorDestinationProperties", + "traits": { + "smithy.api#documentation": "The properties that are required to query the custom Connector.
" + } } }, "traits": { @@ -2271,6 +3221,12 @@ "smithy.api#documentation": " Specifies whether the field can be updated during an UPDATE
or\n UPSERT
write operation.
Specifies whether the field can use the default value during a Create operation.
" + } + }, "supportedWriteOperations": { "target": "com.amazonaws.appflow#SupportedWriteOperationList", "traits": { @@ -2292,6 +3248,12 @@ "smithy.api#required": {} } }, + "apiVersion": { + "target": "com.amazonaws.appflow#ApiVersion", + "traits": { + "smithy.api#documentation": "The API version that the destination connector uses.
" + } + }, "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { @@ -2336,6 +3298,9 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#Double": { + "type": "double" + }, "com.amazonaws.appflow#DynatraceConnectorOperator": { "type": "string", "traits": { @@ -2465,6 +3430,16 @@ "smithy.api#pattern": "^[\\s\\w/!@#+=.-]*$" } }, + "com.amazonaws.appflow#EntityName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#ErrorHandlingConfig": { "type": "structure", "members": { @@ -2706,17 +3681,41 @@ "smithy.api#required": {} } }, - "filterOperators": { - "target": "com.amazonaws.appflow#FilterOperatorList", + "filterOperators": { + "target": "com.amazonaws.appflow#FilterOperatorList", + "traits": { + "smithy.api#documentation": "The list of operators supported by a field.
", + "smithy.api#required": {} + } + }, + "supportedValues": { + "target": "com.amazonaws.appflow#SupportedValueList", + "traits": { + "smithy.api#documentation": " The list of values that a field can contain. For example, a Boolean\n fieldType
can have two values: \"true\" and \"false\".
The regular expression pattern for the field name.
" + } + }, + "supportedDateFormat": { + "target": "com.amazonaws.appflow#String", + "traits": { + "smithy.api#documentation": "The date format that the field supports.
" + } + }, + "fieldValueRange": { + "target": "com.amazonaws.appflow#Range", "traits": { - "smithy.api#documentation": "The list of operators supported by a field.
", - "smithy.api#required": {} + "smithy.api#documentation": "The range of values this field can hold.
" } }, - "supportedValues": { - "target": "com.amazonaws.appflow#SupportedValueList", + "fieldLengthRange": { + "target": "com.amazonaws.appflow#Range", "traits": { - "smithy.api#documentation": " The list of values that a field can contain. For example, a Boolean\n fieldType
can have two values: \"true\" and \"false\".
This is the allowable length range for this field's value.
" } } }, @@ -2792,12 +3791,24 @@ "smithy.api#documentation": "Specifies the source connector type, such as Salesforce, Amazon S3, Amplitude, and so on.\n
" } }, + "sourceConnectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label of the source connector in the flow.
" + } + }, "destinationConnectorType": { "target": "com.amazonaws.appflow#ConnectorType", "traits": { "smithy.api#documentation": "Specifies the destination connector type, such as Salesforce, Amazon S3, Amplitude, and\n so on.
" } }, + "destinationConnectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label of the destination connector in the flow.
" + } + }, "triggerType": { "target": "com.amazonaws.appflow#TriggerType", "traits": { @@ -3089,8 +4100,7 @@ "traits": { "smithy.api#documentation": "A list of field names that can be used as an ID field when performing a write operation.\n
", "smithy.api#length": { - "min": 0, - "max": 1 + "min": 0 } } }, @@ -3311,6 +4321,21 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.appflow#LambdaConnectorProvisioningConfig": { + "type": "structure", + "members": { + "lambdaArn": { + "target": "com.amazonaws.appflow#ARN", + "traits": { + "smithy.api#documentation": "Lambda ARN of the connector being registered.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Contains information about the configuration of the lambda which is being registered as\n the connector.
" + } + }, "com.amazonaws.appflow#ListConnectorEntities": { "type": "operation", "input": { @@ -3351,7 +4376,7 @@ "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { - "smithy.api#documentation": " The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account, and is used to query the downstream\n connector.
The name of the connector profile. The name is unique for each\n ConnectorProfile
in the Amazon Web Services account, and is used to query the\n downstream connector.
This optional parameter is specific to connector implementation. Some connectors support\n multiple levels or categories of entities. You can find out the list of roots for such\n providers by sending a request without the entitiesPath
parameter. If the\n connector supports entities at different roots, this initial request returns the list of\n roots. Otherwise, this request returns all entities supported by the provider.
The version of the API that's used by the connector.
" + } } } }, @@ -3380,6 +4411,70 @@ } } }, + "com.amazonaws.appflow#ListConnectors": { + "type": "operation", + "input": { + "target": "com.amazonaws.appflow#ListConnectorsRequest" + }, + "output": { + "target": "com.amazonaws.appflow#ListConnectorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appflow#InternalServerException" + }, + { + "target": "com.amazonaws.appflow#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Returns the list of all registered custom connectors in your Amazon Web Services account.\n This API lists only custom connectors registered in this account, not the Amazon Web Services\n authored connectors.
", + "smithy.api#http": { + "method": "POST", + "uri": "/list-connectors", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.appflow#ListConnectorsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.appflow#MaxResults", + "traits": { + "smithy.api#documentation": "Specifies the maximum number of items that should be returned in the result set. The\n default for maxResults
is 20 (for all paginated API operations).
The pagination token for the next page of data.
" + } + } + } + }, + "com.amazonaws.appflow#ListConnectorsResponse": { + "type": "structure", + "members": { + "connectors": { + "target": "com.amazonaws.appflow#ConnectorList", + "traits": { + "smithy.api#documentation": "Contains information about the connectors supported by Amazon AppFlow.
" + } + }, + "nextToken": { + "target": "com.amazonaws.appflow#NextToken", + "traits": { + "smithy.api#documentation": "The pagination token for the next page of data. If nextToken=null, this means that all\n records have been fetched.
" + } + } + } + }, "com.amazonaws.appflow#ListFlows": { "type": "operation", "input": { @@ -3496,6 +4591,16 @@ } } }, + "com.amazonaws.appflow#LogoURL": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^(https?|ftp|file)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" + } + }, "com.amazonaws.appflow#LogonLanguage": { "type": "string", "traits": { @@ -3701,6 +4806,116 @@ "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.appflow#OAuth2Credentials": { + "type": "structure", + "members": { + "clientId": { + "target": "com.amazonaws.appflow#ClientId", + "traits": { + "smithy.api#documentation": "The identifier for the desired client.
" + } + }, + "clientSecret": { + "target": "com.amazonaws.appflow#ClientSecret", + "traits": { + "smithy.api#documentation": "The client secret used by the OAuth client to authenticate to the authorization\n server.
" + } + }, + "accessToken": { + "target": "com.amazonaws.appflow#AccessToken", + "traits": { + "smithy.api#documentation": "The access token used to access the connector on your behalf.
" + } + }, + "refreshToken": { + "target": "com.amazonaws.appflow#RefreshToken", + "traits": { + "smithy.api#documentation": "The refresh token used to refresh an expired access token.
" + } + }, + "oAuthRequest": { + "target": "com.amazonaws.appflow#ConnectorOAuthRequest" + } + }, + "traits": { + "smithy.api#documentation": "The OAuth 2.0 credentials required for OAuth 2.0 authentication.
" + } + }, + "com.amazonaws.appflow#OAuth2Defaults": { + "type": "structure", + "members": { + "oauthScopes": { + "target": "com.amazonaws.appflow#OAuthScopeList", + "traits": { + "smithy.api#documentation": "OAuth 2.0 scopes that the connector supports.
" + } + }, + "tokenUrls": { + "target": "com.amazonaws.appflow#TokenUrlList", + "traits": { + "smithy.api#documentation": "Token URLs that can be used for OAuth 2.0 authentication.
" + } + }, + "authCodeUrls": { + "target": "com.amazonaws.appflow#AuthCodeUrlList", + "traits": { + "smithy.api#documentation": "Auth code URLs that can be used for OAuth 2.0 authentication.
" + } + }, + "oauth2GrantTypesSupported": { + "target": "com.amazonaws.appflow#OAuth2GrantTypeSupportedList", + "traits": { + "smithy.api#documentation": "OAuth 2.0 grant types supported by the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains the default values required for OAuth 2.0 authentication.
" + } + }, + "com.amazonaws.appflow#OAuth2GrantType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CLIENT_CREDENTIALS", + "name": "CLIENT_CREDENTIALS" + }, + { + "value": "AUTHORIZATION_CODE", + "name": "AUTHORIZATION_CODE" + } + ] + } + }, + "com.amazonaws.appflow#OAuth2GrantTypeSupportedList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#OAuth2GrantType" + } + }, + "com.amazonaws.appflow#OAuth2Properties": { + "type": "structure", + "members": { + "tokenUrl": { + "target": "com.amazonaws.appflow#TokenUrl", + "traits": { + "smithy.api#documentation": "The token URL required for OAuth 2.0 authentication.
", + "smithy.api#required": {} + } + }, + "oAuth2GrantType": { + "target": "com.amazonaws.appflow#OAuth2GrantType", + "traits": { + "smithy.api#documentation": "The OAuth 2.0 grant type used by connector for OAuth 2.0 authentication.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The OAuth 2.0 properties required for OAuth 2.0 authentication.
" + } + }, "com.amazonaws.appflow#OAuthCredentials": { "type": "structure", "members": { @@ -3747,14 +4962,14 @@ "tokenUrl": { "target": "com.amazonaws.appflow#TokenUrl", "traits": { - "smithy.api#documentation": "The token url required to fetch access/refresh tokens using authorization code and also to refresh expired\n access token using refresh token.
", + "smithy.api#documentation": "The token url required to fetch access/refresh tokens using authorization code and also\n to refresh expired access token using refresh token.
", "smithy.api#required": {} } }, "authCodeUrl": { "target": "com.amazonaws.appflow#AuthCodeUrl", "traits": { - "smithy.api#documentation": "The authorization code url required to redirect to SAP Login Page to fetch authorization code for OAuth type\n authentication.
", + "smithy.api#documentation": "The authorization code url required to redirect to SAP Login Page to fetch authorization\n code for OAuth type authentication.
", "smithy.api#required": {} } }, @@ -3777,7 +4992,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^[/\\w]*$" + "smithy.api#pattern": "^\\S+$" } }, "com.amazonaws.appflow#OAuthScopeList": { @@ -3942,24 +5157,115 @@ "name": "MASK_LENGTH" }, { - "value": "TRUNCATE_LENGTH", - "name": "TRUNCATE_LENGTH" + "value": "TRUNCATE_LENGTH", + "name": "TRUNCATE_LENGTH" + }, + { + "value": "MATH_OPERATION_FIELDS_ORDER", + "name": "MATH_OPERATION_FIELDS_ORDER" + }, + { + "value": "CONCAT_FORMAT", + "name": "CONCAT_FORMAT" + }, + { + "value": "SUBFIELD_CATEGORY_MAP", + "name": "SUBFIELD_CATEGORY_MAP" + }, + { + "value": "EXCLUDE_SOURCE_FIELDS_LIST", + "name": "EXCLUDE_SOURCE_FIELDS_LIST" + } + ] + } + }, + "com.amazonaws.appflow#Operators": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PROJECTION", + "name": "PROJECTION" + }, + { + "value": "LESS_THAN", + "name": "LESS_THAN" + }, + { + "value": "GREATER_THAN", + "name": "GREATER_THAN" + }, + { + "value": "CONTAINS", + "name": "CONTAINS" + }, + { + "value": "BETWEEN", + "name": "BETWEEN" + }, + { + "value": "LESS_THAN_OR_EQUAL_TO", + "name": "LESS_THAN_OR_EQUAL_TO" + }, + { + "value": "GREATER_THAN_OR_EQUAL_TO", + "name": "GREATER_THAN_OR_EQUAL_TO" + }, + { + "value": "EQUAL_TO", + "name": "EQUAL_TO" + }, + { + "value": "NOT_EQUAL_TO", + "name": "NOT_EQUAL_TO" + }, + { + "value": "ADDITION", + "name": "ADDITION" + }, + { + "value": "MULTIPLICATION", + "name": "MULTIPLICATION" + }, + { + "value": "DIVISION", + "name": "DIVISION" + }, + { + "value": "SUBTRACTION", + "name": "SUBTRACTION" + }, + { + "value": "MASK_ALL", + "name": "MASK_ALL" + }, + { + "value": "MASK_FIRST_N", + "name": "MASK_FIRST_N" + }, + { + "value": "MASK_LAST_N", + "name": "MASK_LAST_N" + }, + { + "value": "VALIDATE_NON_NULL", + "name": "VALIDATE_NON_NULL" }, { - "value": "MATH_OPERATION_FIELDS_ORDER", - "name": "MATH_OPERATION_FIELDS_ORDER" + "value": "VALIDATE_NON_ZERO", + "name": "VALIDATE_NON_ZERO" }, { - "value": "CONCAT_FORMAT", - "name": "CONCAT_FORMAT" + "value": "VALIDATE_NON_NEGATIVE", + "name": "VALIDATE_NON_NEGATIVE" }, { - "value": "SUBFIELD_CATEGORY_MAP", - "name": "SUBFIELD_CATEGORY_MAP" + "value": "VALIDATE_NUMERIC", + "name": "VALIDATE_NUMERIC" }, { - "value": "EXCLUDE_SOURCE_FIELDS_LIST", - "name": "EXCLUDE_SOURCE_FIELDS_LIST" + "value": "NO_OP", + "name": "NO_OP" } ] } @@ -4142,6 +5448,41 @@ "smithy.api#pattern": "^$|com.amazonaws.vpce.[\\w/!:@#.\\-]+$" } }, + "com.amazonaws.appflow#ProfilePropertiesMap": { + "type": "map", + "key": { + "target": "com.amazonaws.appflow#ProfilePropertyKey" + }, + "value": { + "target": "com.amazonaws.appflow#ProfilePropertyValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.appflow#ProfilePropertyKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w]+$" + } + }, + "com.amazonaws.appflow#ProfilePropertyValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#Property": { "type": "string", "traits": { @@ -4152,6 +5493,26 @@ "smithy.api#pattern": "^.+$" } }, + "com.amazonaws.appflow#Range": { + "type": "structure", + "members": { + "maximum": { + "target": "com.amazonaws.appflow#Double", + "traits": { + "smithy.api#documentation": "Maximum value supported by the field.
" + } + }, + "minimum": { + "target": "com.amazonaws.appflow#Double", + "traits": { + "smithy.api#documentation": "Minimum value supported by the field.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The range of values that the property supports.
" + } + }, "com.amazonaws.appflow#RedirectUri": { "type": "string", "traits": { @@ -4265,7 +5626,7 @@ "traits": { "smithy.api#length": { "min": 0, - "max": 512 + "max": 1024 }, "smithy.api#pattern": "^\\S+$" } @@ -4286,6 +5647,102 @@ "target": "com.amazonaws.appflow#Region" } }, + "com.amazonaws.appflow#RegisterConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.appflow#RegisterConnectorRequest" + }, + "output": { + "target": "com.amazonaws.appflow#RegisterConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appflow#AccessDeniedException" + }, + { + "target": "com.amazonaws.appflow#ConflictException" + }, + { + "target": "com.amazonaws.appflow#ConnectorAuthenticationException" + }, + { + "target": "com.amazonaws.appflow#ConnectorServerException" + }, + { + "target": "com.amazonaws.appflow#InternalServerException" + }, + { + "target": "com.amazonaws.appflow#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.appflow#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.appflow#ThrottlingException" + }, + { + "target": "com.amazonaws.appflow#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Registers a new connector with your Amazon Web Services account. Before you can register\n the connector, you must deploy lambda in your account.
", + "smithy.api#http": { + "method": "POST", + "uri": "/register-connector", + "code": 200 + } + } + }, + "com.amazonaws.appflow#RegisterConnectorRequest": { + "type": "structure", + "members": { + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": " The name of the connector. The name is unique for each ConnectorRegistration
\n in your Amazon Web Services account.
A description about the connector that's being registered.
" + } + }, + "connectorProvisioningType": { + "target": "com.amazonaws.appflow#ConnectorProvisioningType", + "traits": { + "smithy.api#documentation": "The provisioning type of the connector. Currently the only supported value is LAMBDA.\n
" + } + }, + "connectorProvisioningConfig": { + "target": "com.amazonaws.appflow#ConnectorProvisioningConfig", + "traits": { + "smithy.api#documentation": "The provisioning type of the connector. Currently the only supported value is\n LAMBDA.
" + } + } + } + }, + "com.amazonaws.appflow#RegisterConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.appflow#ARN", + "traits": { + "smithy.api#documentation": "The ARN of the connector being registered.
" + } + } + } + }, + "com.amazonaws.appflow#RegisteredBy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^\\S+$" + } + }, "com.amazonaws.appflow#ResourceNotFoundException": { "type": "structure", "members": { @@ -4909,6 +6366,21 @@ }, "com.amazonaws.appflow#SandstoneConfigurationServiceLambda": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Appflow", + "arnNamespace": "appflow", + "cloudFormationName": "Appflow", + "cloudTrailEventSource": "appflow.amazonaws.com", + "endpointPrefix": "appflow" + }, + "aws.auth#sigv4": { + "name": "appflow" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Welcome to the Amazon AppFlow API reference. This guide is for developers who need\n detailed information about the Amazon AppFlow API operations, data types, and errors.
\n\nAmazon AppFlow is a fully managed integration service that enables you to securely\n transfer data between software as a service (SaaS) applications like Salesforce, Marketo,\n Slack, and ServiceNow, and Amazon Web Services like Amazon S3 and Amazon Redshift.
\n\n\n\nUse the following links to get started on the Amazon AppFlow API:
\n\n\n Actions: An alphabetical list of all Amazon AppFlow API operations.
\n\n Data\n types: An alphabetical list of all Amazon AppFlow data types.
\n\n Common parameters: Parameters that all Query operations can use.
\n\n Common\n errors: Client and server errors that all operations can return.
\nIf you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User\n Guide.
\nAmazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include\n applicable OAuth attributes (such as auth-code
and redirecturi
) with\n the connector-specific ConnectorProfileProperties
when creating a new connector\n profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the\n \n Authorize Apps with OAuth\n documentation.
Welcome to the Amazon AppFlow API reference. This guide is for developers who need\n detailed information about the Amazon AppFlow API operations, data types, and errors.
\n\nAmazon AppFlow is a fully managed integration service that enables you to securely\n transfer data between software as a service (SaaS) applications like Salesforce, Marketo,\n Slack, and ServiceNow, and Amazon Web Services like Amazon S3 and Amazon Redshift.
\n\n\n\nUse the following links to get started on the Amazon AppFlow API:
\n\n\n Actions: An alphabetical list of all Amazon AppFlow API operations.
\n\n Data\n types: An alphabetical list of all Amazon AppFlow data types.
\n\n Common parameters: Parameters that all Query operations can use.
\n\n Common\n errors: Client and server errors that all operations can return.
\nIf you're new to Amazon AppFlow, we recommend that you review the Amazon AppFlow User\n Guide.
\nAmazon AppFlow API users can use vendor-specific mechanisms for OAuth, and include\n applicable OAuth attributes (such as auth-code
and redirecturi
) with\n the connector-specific ConnectorProfileProperties
when creating a new connector\n profile using Amazon AppFlow API operations. For example, Salesforce users can refer to the\n \n Authorize Apps with OAuth\n documentation.
Indicates if the field can be queried.
" } + }, + "isTimestampFieldForIncrementalQueries": { + "target": "com.amazonaws.appflow#Boolean", + "traits": { + "smithy.api#documentation": "Indicates if this timestamp field can be used for incremental queries.
" + } } }, "traits": { @@ -5786,6 +7264,12 @@ "smithy.api#required": {} } }, + "apiVersion": { + "target": "com.amazonaws.appflow#ApiVersion", + "traits": { + "smithy.api#documentation": "The API version of the connector when it's used as a source in the flow.
" + } + }, "connectorProfileName": { "target": "com.amazonaws.appflow#ConnectorProfileName", "traits": { @@ -5956,6 +7440,22 @@ "smithy.api#pattern": ".*" } }, + "com.amazonaws.appflow#SupportedApiVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.appflow#SupportedApiVersionList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#SupportedApiVersion" + } + }, "com.amazonaws.appflow#SupportedFieldTypeDetails": { "type": "structure", "members": { @@ -5971,6 +7471,12 @@ "smithy.api#documentation": " Contains details regarding all the supported FieldTypes
and their\n corresponding filterOperators
and supportedValues
.
API calls have exceeded the maximum allowed API request rate per account and per Region.\n
", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, "com.amazonaws.appflow#Timezone": { "type": "string", "traits": { @@ -6196,6 +7715,12 @@ "smithy.api#pattern": "^(https?)://[-a-zA-Z0-9+&@#/%?=~_|!:,.;]*[-a-zA-Z0-9+&@#/%=~_|]$" } }, + "com.amazonaws.appflow#TokenUrlList": { + "type": "list", + "member": { + "target": "com.amazonaws.appflow#TokenUrl" + } + }, "com.amazonaws.appflow#TrendmicroConnectorOperator": { "type": "string", "traits": { @@ -6363,6 +7888,56 @@ "target": "com.amazonaws.appflow#TriggerType" } }, + "com.amazonaws.appflow#UnregisterConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.appflow#UnregisterConnectorRequest" + }, + "output": { + "target": "com.amazonaws.appflow#UnregisterConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.appflow#ConflictException" + }, + { + "target": "com.amazonaws.appflow#InternalServerException" + }, + { + "target": "com.amazonaws.appflow#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Unregisters the custom connector registered in your account that matches the\n connectorLabel provided in the request.
", + "smithy.api#http": { + "method": "POST", + "uri": "/unregister-connector", + "code": 200 + } + } + }, + "com.amazonaws.appflow#UnregisterConnectorRequest": { + "type": "structure", + "members": { + "connectorLabel": { + "target": "com.amazonaws.appflow#ConnectorLabel", + "traits": { + "smithy.api#documentation": "The label of the connector. The label is unique for each\n ConnectorRegistration
in your Amazon Web Services account.
Indicates whether Amazon AppFlow should unregister the connector, even if it is currently\n in use in one or more connector profiles. The default value is false.
" + } + } + } + }, + "com.amazonaws.appflow#UnregisterConnectorResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.appflow#UnsupportedOperationException": { "type": "structure", "members": { @@ -6907,6 +8482,10 @@ { "value": "UPDATE", "name": "UPDATE" + }, + { + "value": "DELETE", + "name": "DELETE" } ] } diff --git a/codegen/sdk-codegen/aws-models/athena.json b/codegen/sdk-codegen/aws-models/athena.json index 604b62c72ba87..7781e7a6bd535 100644 --- a/codegen/sdk-codegen/aws-models/athena.json +++ b/codegen/sdk-codegen/aws-models/athena.json @@ -1353,7 +1353,7 @@ } ], "traits": { - "smithy.api#documentation": "Streams the results of a single query execution specified by\n QueryExecutionId
from the Athena query results location in\n Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query\n but returns results. Use StartQueryExecution to run a query.
To stream query results successfully, the IAM principal with permission to call\n GetQueryResults
also must have permissions to the Amazon S3\n GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3\n GetObject
action for the query results location are able to retrieve\n query results from Amazon S3 even if permission to the\n GetQueryResults
action is denied. To restrict user or role access,\n ensure that Amazon S3 permissions to the Athena query location\n are denied.
Streams the results of a single query execution specified by\n QueryExecutionId
from the Athena query results location in\n Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query\n but returns results. Use StartQueryExecution to run a query.
If the original query execution ran using an ResultConfiguration$ExpectedBucketOwner setting, the setting also\n applies to Amazon S3 read operations when GetQueryResults
is\n called. If an expected bucket owner has been specified and the query results are in an\n Amazon S3 bucket whose owner account ID is different from the expected\n bucket owner, the GetQueryResults
call fails with an Amazon S3\n permissions error.
To stream query results successfully, the IAM principal with permission to call\n GetQueryResults
also must have permissions to the Amazon S3\n GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3\n GetObject
action for the query results location are able to retrieve\n query results from Amazon S3 even if permission to the\n GetQueryResults
action is denied. To restrict user or role access,\n ensure that Amazon S3 permissions to the Athena query location\n are denied.
If query results are encrypted in Amazon S3, indicates the encryption option\n used (for example, SSE-KMS
or CSE-KMS
) and key information.\n This is a client-side setting. If workgroup settings override client-side settings, then\n the query uses the encryption configuration that is specified for the workgroup, and\n also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings,\n then the query uses the ExpectedBucketOwner
setting that is specified for\n the workgroup, and also uses the location for storing query results specified in the\n workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration\n and Workgroup Settings Override Client-Side Settings.
If set to \"true\", indicates that the previously-specified query results location (also\n known as a client-side setting) for queries in this workgroup should be ignored and set\n to null. If set to \"false\" or not set, and a value is present in the\n OutputLocation
in ResultConfigurationUpdates
(the\n client-side setting), the OutputLocation
in the workgroup's\n ResultConfiguration
will be updated with the new value. For more\n information, see Workgroup Settings Override\n Client-Side Settings.
If set to \"true\", indicates that the previously-specified query results location (also\n known as a client-side setting) for queries in this workgroup should be ignored and set\n to null. If set to \"false\" or not set, and a value is present in the\n OutputLocation
in ResultConfigurationUpdates
(the\n client-side setting), the OutputLocation
in the workgroup's\n ResultConfiguration
is updated with the new value. For more\n information, see Workgroup Settings Override\n Client-Side Settings.
If set to \"true\", indicates that the previously-specified encryption configuration\n (also known as the client-side setting) for queries in this workgroup should be ignored\n and set to null. If set to \"false\" or not set, and a value is present in the\n EncryptionConfiguration
in ResultConfigurationUpdates
(the\n client-side setting), the EncryptionConfiguration
in the workgroup's\n ResultConfiguration
will be updated with the new value. For more\n information, see Workgroup Settings Override\n Client-Side Settings.
If set to \"true\", indicates that the previously-specified encryption configuration\n (also known as the client-side setting) for queries in this workgroup should be ignored\n and set to null. If set to \"false\" or not set, and a value is present in the\n EncryptionConfiguration
in ResultConfigurationUpdates
(the\n client-side setting), the EncryptionConfiguration
in the workgroup's\n ResultConfiguration
is updated with the new value. For more\n information, see Workgroup Settings Override\n Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the\n ExpectedBucketOwner
setting that is specified for the workgroup, and\n also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
If set to \"true\", removes the Amazon Web Services account ID previously specified for\n ResultConfiguration$ExpectedBucketOwner. If set to \"false\" or not\n set, and a value is present in the ExpectedBucketOwner
in\n ResultConfigurationUpdates
(the client-side setting), the\n ExpectedBucketOwner
in the workgroup's ResultConfiguration
\n is updated with the new value. For more information, see Workgroup Settings Override\n Client-Side Settings.
The purpose of the data you've provided in the augmented manifest. You can either train or test this data. If you don't specify, the default is train.
\nTRAIN - all of the documents in the manifest will be used for training. If no test documents are provided, Amazon Comprehend will automatically reserve a portion of the training documents for testing.
\nTEST - all of the documents in the manifest will be used for testing.
" + "smithy.api#documentation": "The purpose of the data you've provided in the augmented manifest. You can either train or\n test this data. If you don't specify, the default is train.
\nTRAIN - all of the documents in the manifest will be used for training. If no test\n documents are provided, Amazon Comprehend will automatically reserve a portion of the training\n documents for testing.
\nTEST - all of the documents in the manifest will be used for testing.
" } }, "AttributeNames": { @@ -89,19 +89,19 @@ "AnnotationDataS3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "The S3 prefix to the annotation files that are referred in the augmented manifest file.
" + "smithy.api#documentation": "The S3 prefix to the annotation files that are referred in the augmented manifest\n file.
" } }, "SourceDocumentsS3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest file.
" + "smithy.api#documentation": "The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest\n file.
" } }, "DocumentType": { "target": "com.amazonaws.comprehend#AugmentedManifestsDocumentTypeFormat", "traits": { - "smithy.api#documentation": "The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't specify, the default is PlainTextDocument.
\n\n PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that is encoded in UTF-8.
\n SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural context, like a PDF. For training with Amazon Comprehend, only PDFs are supported. \n For inference, Amazon Comprehend support PDFs, DOCX and TXT.
The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't\n specify, the default is PlainTextDocument.
\n\n PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that\n is encoded in UTF-8.
\n SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural\n context, like a PDF. For training with Amazon Comprehend, only PDFs are supported. For\n inference, Amazon Comprehend support PDFs, DOCX and TXT.
The result of calling the operation. The operation\n returns one object that is successfully processed by the operation.
" + "smithy.api#documentation": "The result of calling the operation. The\n operation returns one object that is successfully processed by the operation.
" } }, "com.amazonaws.comprehend#BatchDetectSyntaxRequest": { @@ -539,7 +539,7 @@ "ResultList": { "target": "com.amazonaws.comprehend#ListOfDetectSyntaxResult", "traits": { - "smithy.api#documentation": "A list of objects containing the results\n of the operation. The results are sorted in ascending order by the Index
field\n and match the order of the documents in the input list. If all of the documents contain an\n error, the ResultList
is empty.
A list of objects containing the\n results of the operation. The results are sorted in ascending order by the Index
\n field and match the order of the documents in the input list. If all of the documents contain\n an error, the ResultList
is empty.
Amazon Comprehend is an AWS service for gaining insight into the content of documents.\n Use these actions to determine the topics contained in your documents, the topics they\n discuss, the predominant sentiment expressed in them, the predominant language used, and\n more.
", + "smithy.api#title": "Amazon Comprehend" + }, "version": "2017-11-27", "operations": [ { @@ -857,6 +872,9 @@ { "target": "com.amazonaws.comprehend#DeleteEntityRecognizer" }, + { + "target": "com.amazonaws.comprehend#DeleteResourcePolicy" + }, { "target": "com.amazonaws.comprehend#DescribeDocumentClassificationJob" }, @@ -884,6 +902,9 @@ { "target": "com.amazonaws.comprehend#DescribePiiEntitiesDetectionJob" }, + { + "target": "com.amazonaws.comprehend#DescribeResourcePolicy" + }, { "target": "com.amazonaws.comprehend#DescribeSentimentDetectionJob" }, @@ -908,6 +929,9 @@ { "target": "com.amazonaws.comprehend#DetectSyntax" }, + { + "target": "com.amazonaws.comprehend#ImportModel" + }, { "target": "com.amazonaws.comprehend#ListDocumentClassificationJobs" }, @@ -950,6 +974,9 @@ { "target": "com.amazonaws.comprehend#ListTopicsDetectionJobs" }, + { + "target": "com.amazonaws.comprehend#PutResourcePolicy" + }, { "target": "com.amazonaws.comprehend#StartDocumentClassificationJob" }, @@ -1007,22 +1034,7 @@ { "target": "com.amazonaws.comprehend#UpdateEndpoint" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Comprehend", - "arnNamespace": "comprehend", - "cloudFormationName": "Comprehend", - "cloudTrailEventSource": "comprehend.amazonaws.com", - "endpointPrefix": "comprehend" - }, - "aws.auth#sigv4": { - "name": "comprehend" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Comprehend is an AWS service for gaining insight into the content of documents.\n Use these actions to determine the topics contained in your documents, the topics they\n discuss, the predominant sentiment expressed in them, the predominant language used, and\n more.
", - "smithy.api#title": "Amazon Comprehend" - } + ] }, "com.amazonaws.comprehend#ConcurrentModificationException": { "type": "structure", @@ -1144,7 +1156,7 @@ "VersionName": { "target": "com.amazonaws.comprehend#VersionName", "traits": { - "smithy.api#documentation": "The version name given to the newly created classifier. \n Version names can have a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. \n The version name must be unique among all models with the same classifier name in the account/AWS Region.
" + "smithy.api#documentation": "The version name given to the newly created classifier. Version names can have a maximum\n of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The\n version name must be unique among all models with the same classifier name in the account/AWS\n Region.
" } }, "DataAccessRoleArn": { @@ -1210,6 +1222,12 @@ "traits": { "smithy.api#documentation": "ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt\n trained custom models. The ModelKmsKeyId can be either of the following formats:
\nKMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key:\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The resource-based policy to attach to your custom document classifier model. You can use\n this policy to allow another AWS account to import your custom model.
\nProvide your policy as a JSON body that you enter as a UTF-8 encoded string without line\n breaks. To provide valid JSON, enclose the attribute names and values in double quotes. If the\n JSON body is also enclosed in double quotes, then you must escape the double quotes that are\n inside the policy:
\n\n \"{\\\"attribute\\\": \\\"value\\\", \\\"attribute\\\": [\\\"value\\\"]}\"
\n
To avoid escaping quotes, you can use single quotes to enclose the policy and double\n quotes to enclose the JSON names and values:
\n\n '{\"attribute\": \"value\", \"attribute\": [\"value\"]}'
\n
Creates a model-specific endpoint for synchronous inference for a previously trained\n custom model\n
" + "smithy.api#documentation": "Creates a model-specific endpoint for synchronous inference for a previously trained\n custom model
" } }, "com.amazonaws.comprehend#CreateEndpointRequest": { @@ -1369,7 +1387,7 @@ "VersionName": { "target": "com.amazonaws.comprehend#VersionName", "traits": { - "smithy.api#documentation": "The version name given to the newly created recognizer. \n Version names can be a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. \n The version name must be unique among all models with the same recognizer name in the account/ AWS Region.
" + "smithy.api#documentation": "The version name given to the newly created recognizer. Version names can be a maximum of\n 256 characters. Alphanumeric characters, hyphens (-) and underscores (_) are allowed. The\n version name must be unique among all models with the same recognizer name in the account/ AWS\n Region.
" } }, "DataAccessRoleArn": { @@ -1423,6 +1441,12 @@ "traits": { "smithy.api#documentation": "ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt\n trained custom models. The ModelKmsKeyId can be either of the following formats
\nKMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key:\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The JSON resource-based policy to attach to your custom entity recognizer model. You can\n use this policy to allow another AWS account to import your custom model.
\nProvide your JSON as a UTF-8 encoded string without line breaks. To provide valid JSON for\n your policy, enclose the attribute names and values in double quotes. If the JSON body is also\n enclosed in double quotes, then you must escape the double quotes that are inside the\n policy:
\n\n \"{\\\"attribute\\\": \\\"value\\\", \\\"attribute\\\": [\\\"value\\\"]}\"
\n
To avoid escaping quotes, you can use single quotes to enclose the policy and double\n quotes to enclose the JSON names and values:
\n\n '{\"attribute\": \"value\", \"attribute\": [\"value\"]}'
\n
Deletes a resource-based policy that is attached to a custom model.
" + } + }, + "com.amazonaws.comprehend#DeleteResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.comprehend#ComprehendModelArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom model version that has the policy to delete.
", + "smithy.api#required": {} + } + }, + "PolicyRevisionId": { + "target": "com.amazonaws.comprehend#PolicyRevisionId", + "traits": { + "smithy.api#documentation": "The revision ID of the policy to delete.
" + } + } + } + }, + "com.amazonaws.comprehend#DeleteResourcePolicyResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.comprehend#DescribeDocumentClassificationJob": { "type": "operation", "input": { @@ -2034,6 +2103,70 @@ } } }, + "com.amazonaws.comprehend#DescribeResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#DescribeResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#DescribeResourcePolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Gets the details of a resource-based policy that is attached to a custom model, including\n the JSON body of the policy.
" + } + }, + "com.amazonaws.comprehend#DescribeResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.comprehend#ComprehendModelArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the policy to describe.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.comprehend#DescribeResourcePolicyResponse": { + "type": "structure", + "members": { + "ResourcePolicy": { + "target": "com.amazonaws.comprehend#Policy", + "traits": { + "smithy.api#documentation": "The JSON body of the resource-based policy.
" + } + }, + "CreationTime": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the policy was created.
" + } + }, + "LastModifiedTime": { + "target": "com.amazonaws.comprehend#Timestamp", + "traits": { + "smithy.api#documentation": "The time at which the policy was last modified.
" + } + }, + "PolicyRevisionId": { + "target": "com.amazonaws.comprehend#PolicyRevisionId", + "traits": { + "smithy.api#documentation": "The revision ID of the policy. Each time you modify a policy, Amazon Comprehend assigns a\n new revision ID, and it deletes the prior version of the policy.
" + } + } + } + }, "com.amazonaws.comprehend#DescribeSentimentDetectionJob": { "type": "operation", "input": { @@ -2722,7 +2855,7 @@ "TestS3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 URI for the input data. \n The Amazon S3 bucket must be in the same AWS Region as the API endpoint that you are calling.\n The URI can point to a single input file or it can provide the prefix for a collection of input files.
" + "smithy.api#documentation": "The Amazon S3 URI for the input data. The Amazon S3 bucket must be in the same AWS Region\n as the API endpoint that you are calling. The URI can point to a single input file or it can\n provide the prefix for a collection of input files.
" } }, "LabelDelimiter": { @@ -2881,6 +3014,12 @@ "traits": { "smithy.api#documentation": "The version name that you assigned to the document classifier.
" } + }, + "SourceModelArn": { + "target": "com.amazonaws.comprehend#DocumentClassifierArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the source model. This model was imported from a\n different AWS account to create the document classifier model in your AWS account.
" + } } }, "traits": { @@ -2975,7 +3114,7 @@ "com.amazonaws.comprehend#DocumentReadFeatureTypes": { "type": "string", "traits": { - "smithy.api#documentation": "A list of the types of analyses to perform. This field specifies what feature types need to be extracted from the document where entity recognition is \n expected.
\n \n\n TABLES
- Add TABLES to the list to return information about the tables\n that are detected in the input document.
\n FORMS
- Add FORMS to return detected form data.
A list of the types of analyses to perform. This field specifies what feature types\n need to be extracted from the document where entity recognition is expected.
\n\n\n TABLES
- Add TABLES to the list to return information about the tables\n that are detected in the input document.
\n FORMS
- Add FORMS to return detected form data.
This enum field will start with two values which will apply to PDFs:
\n\n TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText for PDF documents per page.
\n TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF documents per page.
This enum field will start with two values which will apply to PDFs:
\n\n TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText\n for PDF documents per page.
\n TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF\n documents per page.
This enum field provides two values:
\n\n SERVICE_DEFAULT
- use service defaults for Document reading. For Digital PDF it would mean using an internal parser instead of Textract APIs
\n FORCE_DOCUMENT_READ_ACTION
- Always use specified action for DocumentReadAction, including Digital PDF.\n
This enum field provides two values:
\n\n SERVICE_DEFAULT
- use service defaults for Document reading. For\n Digital PDF it would mean using an internal parser instead of Textract APIs
\n FORCE_DOCUMENT_READ_ACTION
- Always use specified action for\n DocumentReadAction, including Digital PDF.
Provides information for filtering a list of dominant language detection jobs. For more\n information, see the operation.
" + "smithy.api#documentation": "Provides information for filtering a list of dominant language detection jobs. For more\n information, see the \n operation.
" } }, "com.amazonaws.comprehend#DominantLanguageDetectionJobProperties": { @@ -3236,7 +3375,7 @@ "DesiredModelArn": { "target": "com.amazonaws.comprehend#ComprehendModelArn", "traits": { - "smithy.api#documentation": "ARN of the new model to use for updating an existing endpoint. This ARN is going to be different from the model ARN when the update is in progress
" + "smithy.api#documentation": "ARN of the new model to use for updating an existing endpoint. This ARN is going to be\n different from the model ARN when the update is in progress
" } }, "DesiredInferenceUnits": { @@ -3272,7 +3411,7 @@ "DesiredDataAccessRoleArn": { "target": "com.amazonaws.comprehend#IamRoleArn", "traits": { - "smithy.api#documentation": "Data access role ARN to use in case the new model is encrypted with a customer KMS key.
" + "smithy.api#documentation": "Data access role ARN to use in case the new model is encrypted with a customer KMS\n key.
" } } }, @@ -3514,7 +3653,7 @@ "TestS3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "This specifies the Amazon S3 location where the test annotations for an entity recognizer are located. \n The URI must be in the same AWS Region as the API endpoint that you are calling.
" + "smithy.api#documentation": "This specifies the Amazon S3 location where the test annotations for an entity recognizer\n are located. The URI must be in the same AWS Region as the API endpoint that you are\n calling.
" } } }, @@ -3566,13 +3705,13 @@ "TestS3Uri": { "target": "com.amazonaws.comprehend#S3Uri", "traits": { - "smithy.api#documentation": "Specifies the Amazon S3 location where the test documents for an entity recognizer are located. \n The URI must be in the same AWS Region as the API endpoint that you are calling.
" + "smithy.api#documentation": "Specifies the Amazon S3 location where the test documents for an entity recognizer are\n located. The URI must be in the same AWS Region as the API endpoint that you are\n calling.
" } }, "InputFormat": { "target": "com.amazonaws.comprehend#InputFormat", "traits": { - "smithy.api#documentation": "Specifies how the text in an input file should be processed. This is optional, and the default is ONE_DOC_PER_LINE.\n \n ONE_DOC_PER_FILE - Each file is considered a separate document. Use this option when you are processing large documents, such as newspaper articles or scientific papers.\n \n ONE_DOC_PER_LINE - Each line in a file is considered a separate document. Use this option when you are processing many short documents, such as text messages.
" + "smithy.api#documentation": "Specifies how the text in an input file should be processed. This is optional, and the\n default is ONE_DOC_PER_LINE. ONE_DOC_PER_FILE - Each file is considered a separate document.\n Use this option when you are processing large documents, such as newspaper articles or\n scientific papers. ONE_DOC_PER_LINE - Each line in a file is considered a separate document.\n Use this option when you are processing many short documents, such as text messages.
" } } }, @@ -3865,6 +4004,12 @@ "traits": { "smithy.api#documentation": "The version name you assigned to the entity recognizer.
" } + }, + "SourceModelArn": { + "target": "com.amazonaws.comprehend#EntityRecognizerArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the source model. This model was imported from a\n different AWS account to create the entity recognizer model in your AWS account.
" + } } }, "traits": { @@ -3971,7 +4116,7 @@ "min": 0, "max": 64 }, - "smithy.api#pattern": "^(?:(?!\\\\n+|\\\\t+|\\\\r+|[\\r\\t\\n,]).)+$" + "smithy.api#pattern": "^(?![^\\n\\r\\t,]*\\\\n|\\\\r|\\\\t)[^\\n\\r\\t,]+$" } }, "com.amazonaws.comprehend#EntityTypesEvaluationMetrics": { @@ -4165,6 +4310,100 @@ "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+$" } }, + "com.amazonaws.comprehend#ImportModel": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#ImportModelRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#ImportModelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#KmsKeyValidationException" + }, + { + "target": "com.amazonaws.comprehend#ResourceInUseException" + }, + { + "target": "com.amazonaws.comprehend#ResourceLimitExceededException" + }, + { + "target": "com.amazonaws.comprehend#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.comprehend#ResourceUnavailableException" + }, + { + "target": "com.amazonaws.comprehend#TooManyRequestsException" + }, + { + "target": "com.amazonaws.comprehend#TooManyTagsException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a new custom model that replicates a source custom model that you import. The\n source model can be in your AWS account or another one.
\nIf the source model is in another AWS account, then it must have a resource-based policy\n that authorizes you to import it.
\nThe source model must be in the same AWS region that you're using when you import. You\n can't import a model that's in a different region.
" + } + }, + "com.amazonaws.comprehend#ImportModelRequest": { + "type": "structure", + "members": { + "SourceModelArn": { + "target": "com.amazonaws.comprehend#ComprehendModelArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom model to import.
", + "smithy.api#required": {} + } + }, + "ModelName": { + "target": "com.amazonaws.comprehend#ComprehendArnName", + "traits": { + "smithy.api#documentation": "The name to assign to the custom model that is created in Amazon Comprehend by this\n import.
" + } + }, + "VersionName": { + "target": "com.amazonaws.comprehend#VersionName", + "traits": { + "smithy.api#documentation": "The version name given to the custom model that is created by this import. Version names\n can have a maximum of 256 characters. Alphanumeric characters, hyphens (-) and underscores (_)\n are allowed. The version name must be unique among all models with the same classifier name in\n the account/AWS Region.
" + } + }, + "ModelKmsKeyId": { + "target": "com.amazonaws.comprehend#KmsKeyId", + "traits": { + "smithy.api#documentation": "ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt\n trained custom models. The ModelKmsKeyId can be either of the following formats:
\nKMS Key ID: \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key:\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The Amazon Resource Name (ARN) of the AWS Identity and Management (IAM) role that allows\n Amazon Comprehend to use Amazon Key Management Service (KMS) to encrypt or decrypt the custom\n model.
" + } + }, + "Tags": { + "target": "com.amazonaws.comprehend#TagList", + "traits": { + "smithy.api#documentation": "Tags to be associated with the custom model that is created by this import. A tag is a\n key-value pair that adds as a metadata to a resource used by Amazon Comprehend. For example, a\n tag with \"Sales\" as the key might be added to a resource to indicate its use by the sales\n department.
" + } + } + } + }, + "com.amazonaws.comprehend#ImportModelResponse": { + "type": "structure", + "members": { + "ModelArn": { + "target": "com.amazonaws.comprehend#ComprehendModelArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom model being imported.
" + } + } + } + }, "com.amazonaws.comprehend#InferenceUnitsInteger": { "type": "integer", "traits": { @@ -4193,7 +4432,7 @@ "DocumentReaderConfig": { "target": "com.amazonaws.comprehend#DocumentReaderConfig", "traits": { - "smithy.api#documentation": "The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob.
\nUse DocumentReaderConfig to provide specifications about how you want your inference documents read.\n Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.
" + "smithy.api#documentation": "The document reader config field applies only for InputDataConfig of\n StartEntitiesDetectionJob.
\nUse DocumentReaderConfig to provide specifications about how you want your inference\n documents read. Currently it applies for PDF documents in StartEntitiesDetectionJob custom\n inference.
" } } }, @@ -6100,6 +6339,85 @@ "smithy.api#documentation": "Provides configuration parameters for the output of PII entity detection jobs.
" } }, + "com.amazonaws.comprehend#Policy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20000 + }, + "smithy.api#pattern": "^[\\u0009\\u000A\\u000D\\u0020-\\u00FF]+$" + } + }, + "com.amazonaws.comprehend#PolicyRevisionId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + }, + "smithy.api#pattern": "^[0-9A-Fa-f]+$" + } + }, + "com.amazonaws.comprehend#PutResourcePolicy": { + "type": "operation", + "input": { + "target": "com.amazonaws.comprehend#PutResourcePolicyRequest" + }, + "output": { + "target": "com.amazonaws.comprehend#PutResourcePolicyResponse" + }, + "errors": [ + { + "target": "com.amazonaws.comprehend#InternalServerException" + }, + { + "target": "com.amazonaws.comprehend#InvalidRequestException" + }, + { + "target": "com.amazonaws.comprehend#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "Attaches a resource-based policy to a custom model. You can use this policy to authorize\n an entity in another AWS account to import the custom model, which replicates it in Amazon\n Comprehend in their account.
" + } + }, + "com.amazonaws.comprehend#PutResourcePolicyRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.comprehend#ComprehendModelArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom model to attach the policy to.
", + "smithy.api#required": {} + } + }, + "ResourcePolicy": { + "target": "com.amazonaws.comprehend#Policy", + "traits": { + "smithy.api#documentation": "The JSON resource-based policy to attach to your custom model. Provide your JSON as a\n UTF-8 encoded string without line breaks. To provide valid JSON for your policy, enclose the\n attribute names and values in double quotes. If the JSON body is also enclosed in double\n quotes, then you must escape the double quotes that are inside the policy:
\n\n \"{\\\"attribute\\\": \\\"value\\\", \\\"attribute\\\": [\\\"value\\\"]}\"
\n
To avoid escaping quotes, you can use single quotes to enclose the policy and double\n quotes to enclose the JSON names and values:
\n\n '{\"attribute\": \"value\", \"attribute\": [\"value\"]}'
\n
The revision ID that Amazon Comprehend assigned to the policy that you are updating. If\n you are creating a new policy that has no prior version, don't use this parameter. Amazon\n Comprehend creates the revision ID for you.
" + } + } + } + }, + "com.amazonaws.comprehend#PutResourcePolicyResponse": { + "type": "structure", + "members": { + "PolicyRevisionId": { + "target": "com.amazonaws.comprehend#PolicyRevisionId", + "traits": { + "smithy.api#documentation": "The revision ID of the policy. Each time you modify a policy, Amazon Comprehend assigns a\n new revision ID, and it deletes the prior version of the policy.
" + } + } + } + }, "com.amazonaws.comprehend#RedactionConfig": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/cost-explorer.json b/codegen/sdk-codegen/aws-models/cost-explorer.json index 5321d6f0def83..284808505d469 100644 --- a/codegen/sdk-codegen/aws-models/cost-explorer.json +++ b/codegen/sdk-codegen/aws-models/cost-explorer.json @@ -1620,6 +1620,10 @@ "value": "LEGAL_ENTITY_NAME", "name": "LEGAL_ENTITY_NAME" }, + { + "value": "INVOICING_ENTITY", + "name": "INVOICING_ENTITY" + }, { "value": "DEPLOYMENT_OPTION", "name": "DEPLOYMENT_OPTION" @@ -2502,7 +2506,7 @@ "Filter": { "target": "com.amazonaws.costexplorer#Expression", "traits": { - "smithy.api#documentation": "Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE
and LINKED_ACCOUNT
\n\t\t\tand get the costs that are associated with that account's usage of that service. You can nest Expression
objects \n\t\t\tto define any combination of dimension filters. For more information, see \n\t\t\tExpression.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE
and LINKED_ACCOUNT
\n\t\t\tand get the costs that are associated with that account's usage of that service. You can nest Expression
objects \n\t\t\tto define any combination of dimension filters. For more information, see \n\t\t\tExpression.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys,\n cost categories, or any two group by types.
\n\tValid values for the DIMENSION
type are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, LINKED_ACCOUNT
, \n\t\t\tOPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
,\n\t\t TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
When you group by the TAG
type and include a valid tag key, you get all tag values, including empty strings.
You can group Amazon Web Services costs using up to two different groups, either dimensions, tag keys,\n cost categories, or any two group by types.
\n\tValid values for the DIMENSION
type are AZ
, INSTANCE_TYPE
, LEGAL_ENTITY_NAME
, INVOICING_ENTITY
, LINKED_ACCOUNT
, \n\t\t\tOPERATION
, PLATFORM
, PURCHASE_TYPE
, SERVICE
,\n\t\t TENANCY
, RECORD_TYPE
, and USAGE_TYPE
.
When you group by the TAG
type and include a valid tag key, you get all tag values, including empty strings.
Filters Amazon Web Services costs by different dimensions. For example, you can specify\n SERVICE
and LINKED_ACCOUNT
and get the costs that are associated\n with that account's usage of that service. You can nest Expression
objects to\n define any combination of dimension filters. For more information, see Expression.
The GetCostAndUsageWithResources
operation requires that you either group by or filter by a\n ResourceId
. It requires the Expression \n \"SERVICE = Amazon Elastic Compute Cloud - Compute\"
in the filter.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
Filters Amazon Web Services costs by different dimensions. For example, you can specify\n SERVICE
and LINKED_ACCOUNT
and get the costs that are associated\n with that account's usage of that service. You can nest Expression
objects to\n define any combination of dimension filters. For more information, see Expression.
The GetCostAndUsageWithResources
operation requires that you either group by or filter by a\n ResourceId
. It requires the Expression \n \"SERVICE = Amazon Elastic Compute Cloud - Compute\"
in the filter.
Valid values for MatchOptions
for CostCategories
and Tags
are EQUALS
, ABSENT
, and CASE_SENSITIVE
.
The default values are EQUALS
and CASE_SENSITIVE
. Valid values for MatchOptions
for Dimensions
are EQUALS
and CASE_SENSITIVE
.
The name of the dimension. Each Dimension
is available for a different Context
. \n\t\t For more information, see Context. \n\t\t\t\n\t\t
The name of the dimension. Each Dimension
is available for a different Context
. \n\t\t\tFor more information, see Context
. \n\t\t\t\n\t\t
The context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. \n\t\t\tThe default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values \n\t\t\tcan be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, \n\t\t\tthe resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nSERVICE - The Amazon Web Services service such as Amazon DynamoDB.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nREGION - The Amazon Web Services Region.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The Amazon Web Services Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The Amazon Web Services Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plan
\nThe context for the call to GetDimensionValues
. This can be RESERVATIONS
or COST_AND_USAGE
. \n\t\t\tThe default value is COST_AND_USAGE
. If the context is set to RESERVATIONS
, the resulting dimension values \n\t\t\tcan be used in the GetReservationUtilization
operation. If the context is set to COST_AND_USAGE
, \n\t\t\tthe resulting dimension values can be used in the GetCostAndUsage
operation.
If you set the context to COST_AND_USAGE
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
BILLING_ENTITY - The Amazon Web Services seller that your account is with. Possible values are the following:
\n- Amazon Web Services(Amazon Web Services): The entity that sells Amazon Web Services services.
\n- AISPL (Amazon Internet Services Pvt. Ltd.): The local Indian entity that is an acting reseller for Amazon Web Services services in India.
\n- Amazon Web Services Marketplace: The entity that supports the sale of solutions built on Amazon Web Services by third-party software providers.
\nCACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
DATABASE_ENGINE - The Amazon Relational Database Service database. Examples are Aurora or MySQL.
\nINSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
INSTANCE_TYPE_FAMILY - A family of instance types optimized to fit different use cases. Examples are Compute Optimized
(C4
, C5
, C6g
, C7g
etc.), Memory Optimization
(R4
, R5n
, R5b
, R6g
etc).
INVOICING_ENTITY - The name of the entity issuing the Amazon Web Services invoice.
\nLEGAL_ENTITY_NAME - The name of the organization that sells you Amazon Web Services services, such as Amazon Web Services.
\nLINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nOPERATING_SYSTEM - The operating system. Examples are Windows or Linux.
\nOPERATION - The action performed. Examples include RunInstance
and CreateBucket
.
PLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nPURCHASE_TYPE - The reservation type of the purchase to which this usage is related. Examples include On-Demand \n Instances and Standard Reserved Instances.
\nRESERVATION_ID - The unique identifier for an Amazon Web Services Reservation Instance.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
\nSAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute).
\nSERVICE - The Amazon Web Services service such as Amazon DynamoDB.
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nUSAGE_TYPE - The type of usage. An example is DataTransfer-In-Bytes. The response for the GetDimensionValues
operation\n includes a unit attribute. Examples include GB and Hrs.
USAGE_TYPE_GROUP - The grouping of common usage types. An example is Amazon EC2: CloudWatch – Alarms. The response for this \n operation includes a unit attribute.
\nREGION - The Amazon Web Services Region.
\nRECORD_TYPE - The different types of charges such as RI fees, usage costs, tax refunds, and credits.
\nRESOURCE_ID - The unique identifier of the resource. ResourceId is an opt-in feature only available for last 14 days for EC2-Compute Service.
\nIf you set the context to RESERVATIONS
, you can use the following \n dimensions for searching:
AZ - The Availability Zone. An example is us-east-1a
.
CACHE_ENGINE - The Amazon ElastiCache operating system. Examples are Windows or Linux.
\nDEPLOYMENT_OPTION - The scope of Amazon Relational Database Service deployments. Valid values are SingleAZ
and MultiAZ
.
INSTANCE_TYPE - The type of Amazon EC2 instance. An example is m4.xlarge
.
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nPLATFORM - The Amazon EC2 operating system. Examples are Windows or Linux.
\nREGION - The Amazon Web Services Region.
\nSCOPE (Utilization only) - The scope of a Reserved Instance (RI). Values are regional or a single Availability Zone.
\nTAG (Coverage only) - The tags that are associated with a Reserved Instance (RI).
\nTENANCY - The tenancy of a resource. Examples are shared or dedicated.
\nIf you set the context to SAVINGS_PLANS
, you can use the following dimensions for searching:
SAVINGS_PLANS_TYPE - Type of Savings Plans (EC2 Instance or Compute)
\nPAYMENT_OPTION - Payment option for the given Savings Plans (for example, All Upfront)
\nREGION - The Amazon Web Services Region.
\nINSTANCE_TYPE_FAMILY - The family of instances (For example, m5
)
LINKED_ACCOUNT - The description in the attribute map that includes the full name of the member account. The value \n field contains the Amazon Web Services ID of the member account.
\nSAVINGS_PLAN_ARN - The unique identifier for your Savings Plans.
\nYou can group the data by the following attributes:
\n\t\tAZ
\nCACHE_ENGINE
\nDATABASE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nTENANCY
\nYou can group the data by the following attributes:
\n\t\tAZ
\nCACHE_ENGINE
\nDATABASE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nINVOICING_ENTITY
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nTENANCY
\nFilters utilization data by dimensions. You can filter by the following dimensions:
\n\t\tAZ
\nCACHE_ENGINE
\nDATABASE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nSERVICE
\nTAG
\nTENANCY
\n\n GetReservationCoverage
uses the same \n\t\t\tExpression object \n\t\t\tas the other operations, but only AND
is supported among each dimension. You can nest only one level deep.
If you don't provide a SERVICE
filter, Cost Explorer defaults to EC2.
Cost category is also supported.
" + "smithy.api#documentation": "Filters utilization data by dimensions. You can filter by the following dimensions:
\n\t\tAZ
\nCACHE_ENGINE
\nDATABASE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nSERVICE
\nTAG
\nTENANCY
\n\n GetReservationCoverage
uses the same \n\t\t\tExpression object \n\t\t\tas the other operations, but only AND
is supported among each dimension. You can nest only one level deep. \n\t\t\tIf there are multiple values for a dimension, they are OR'd together.
If you don't provide a SERVICE
filter, Cost Explorer defaults to EC2.
Cost category is also supported.
" } }, "Metrics": { @@ -3214,7 +3218,7 @@ "Filter": { "target": "com.amazonaws.costexplorer#Expression", "traits": { - "smithy.api#documentation": "Filters utilization data by dimensions. You can filter by the following dimensions:
\n\t\tAZ
\nCACHE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nSERVICE
\nSCOPE
\nTENANCY
\n\n GetReservationUtilization
uses the same \n\t\t\tExpression object \n\t\t\tas the other operations, but only AND
is supported among each dimension, and nesting is supported up to \n\t\t\tonly one level deep.
Filters utilization data by dimensions. You can filter by the following dimensions:
\n\t\tAZ
\nCACHE_ENGINE
\nDEPLOYMENT_OPTION
\nINSTANCE_TYPE
\nLINKED_ACCOUNT
\nOPERATING_SYSTEM
\nPLATFORM
\nREGION
\nSERVICE
\nSCOPE
\nTENANCY
\n\n GetReservationUtilization
uses the same \n\t\t\tExpression object \n\t\t\tas the other operations, but only AND
is supported among each dimension, and nesting is supported up to \n\t\t\tonly one level deep. If there are multiple values for a dimension, they are OR'd together.
Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:
\n\t\n LINKED_ACCOUNT
\n
\n REGION
\n
\n SERVICE
\n
\n INSTANCE_FAMILY
\n
\n GetSavingsPlansCoverage
doesn't support filtering by tags. GetSavingsPlansCoverage
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
To determine valid values for a dimension, use the GetDimensionValues
operation.
Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:
\n\t\n LINKED_ACCOUNT
\n
\n REGION
\n
\n SERVICE
\n
\n INSTANCE_FAMILY
\n
To determine valid values for a dimension, use the GetDimensionValues
operation.
Filters Savings Plans coverage data by dimensions. You can filter data for Savings Plans usage with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n REGION
\n
\n SERVICE
\n
\n INSTANCE_FAMILY
\n
\n GetSavingsPlansCoverage
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension.
Cost category is supported. Tags are not supported.
" + "smithy.api#documentation": "Filters Savings Plans coverage data by dimensions. You can filter data for Savings Plans usage with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n REGION
\n
\n SERVICE
\n
\n INSTANCE_FAMILY
\n
\n GetSavingsPlansCoverage
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension. If there are multiple values for a dimension, they are OR'd together.
Cost category is also supported.
" } }, "Metrics": { @@ -3568,7 +3572,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Management account in an organization have access to member accounts. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
You cannot group by any dimension values for GetSavingsPlansUtilization
.
\n GetSavingsPlansUtilization
doesn't support filtering by tags. GetSavingsPlansUtilization
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
Retrieves the Savings Plans utilization for your account across date ranges with daily or monthly granularity. Management account in an organization have access to member accounts. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
You cannot group by any dimension values for GetSavingsPlansUtilization
.
Retrieves attribute data along with aggregate utilization and savings data for a given time period. This doesn't support granular or grouped data (daily/monthly) in response. You can't retrieve data by dates in a single response similar to GetSavingsPlanUtilization
, but you have the option to make multiple calls to GetSavingsPlanUtilizationDetails
by providing individual dates. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
\n GetSavingsPlanUtilizationDetails
internally groups data by SavingsPlansArn
.
\n GetSavingsPlansUtilizationDetails
doesn't support filtering by tags. GetSavingsPlansUtilizationDetails
also doesn't support the OR
operator between filter dimensions. For the full request syntax with supported parameters, see Examples.
Retrieves attribute data along with aggregate utilization and savings data for a given time period. This doesn't support granular or grouped data (daily/monthly) in response. You can't retrieve data by dates in a single response similar to GetSavingsPlanUtilization
, but you have the option to make multiple calls to GetSavingsPlanUtilizationDetails
by providing individual dates. You can use GetDimensionValues
in SAVINGS_PLANS
to determine the possible dimension values.
\n GetSavingsPlanUtilizationDetails
internally groups data by SavingsPlansArn
.
Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n SAVINGS_PLAN_ARN
\n
\n REGION
\n
\n PAYMENT_OPTION
\n
\n INSTANCE_TYPE_FAMILY
\n
\n GetSavingsPlansUtilizationDetails
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension.
Filtering by tags isn't supported.
" + "smithy.api#documentation": "Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n SAVINGS_PLAN_ARN
\n
\n REGION
\n
\n PAYMENT_OPTION
\n
\n INSTANCE_TYPE_FAMILY
\n
\n GetSavingsPlansUtilizationDetails
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension.
Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n SAVINGS_PLAN_ARN
\n
\n SAVINGS_PLANS_TYPE
\n
\n REGION
\n
\n PAYMENT_OPTION
\n
\n INSTANCE_TYPE_FAMILY
\n
\n GetSavingsPlansUtilization
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension.
Filtering by tags isn't supported.
" + "smithy.api#documentation": "Filters Savings Plans utilization coverage data for active Savings Plans dimensions. You can filter data with the following dimensions:
\n\n LINKED_ACCOUNT
\n
\n SAVINGS_PLAN_ARN
\n
\n SAVINGS_PLANS_TYPE
\n
\n REGION
\n
\n PAYMENT_OPTION
\n
\n INSTANCE_TYPE_FAMILY
\n
\n GetSavingsPlansUtilization
uses the same \n Expression object \n as the other operations, but only AND
is supported among each dimension.
The BatchWriteItem
operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem
can write up to 16 MB of data,\n which can comprise as many as 25 put or delete requests. Individual items to be written\n can be as large as 400 KB.
\n BatchWriteItem
cannot update items. To update items, use the\n UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified\n in BatchWriteItem
are atomic; however BatchWriteItem
as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems
response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem
in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem
request with those unprocessed items\n until all items have been processed.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem
returns a\n ProvisionedThroughputExceededException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nWith BatchWriteItem
, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem
does not behave in the same way as individual\n PutItem
and DeleteItem
calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem
performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.
\nIf one or more of the following is true, DynamoDB rejects the entire batch write\n operation:
\nOne or more tables specified in the BatchWriteItem
request does\n not exist.
Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.
\nYou try to perform multiple operations on the same item in the same\n BatchWriteItem
request. For example, you cannot put and delete\n the same item in the same BatchWriteItem
request.
Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).
\nThere are more than 25 requests in the batch.
\nAny individual item in a batch exceeds 400 KB.
\nThe total request size exceeds 16 MB.
\nThe BatchWriteItem
operation puts or deletes multiple items in one or\n more tables. A single call to BatchWriteItem
can transmit up to 16MB of \n data over the network, consisting of up to 25 item put or delete operations. While \n individual items can be up to 400 KB once stored, it's important to \n note that an item's representation might be greater than 400KB while being sent in \n DynamoDB's JSON format for the API call. For more details on this distinction, see Naming Rules and Data Types.
\n BatchWriteItem
cannot update items. To update items, use the\n UpdateItem
action.
The individual PutItem
and DeleteItem
operations specified\n in BatchWriteItem
are atomic; however BatchWriteItem
as a\n whole is not. If any requested operations fail because the table's provisioned\n throughput is exceeded or an internal processing failure occurs, the failed operations\n are returned in the UnprocessedItems
response parameter. You can\n investigate and optionally resend the requests. Typically, you would call\n BatchWriteItem
in a loop. Each iteration would check for unprocessed\n items and submit a new BatchWriteItem
request with those unprocessed items\n until all items have been processed.
If none of the items can be processed due to insufficient\n provisioned throughput on all of the tables in the request, then\n BatchWriteItem
returns a\n ProvisionedThroughputExceededException
.
If DynamoDB returns any unprocessed items, you should retry the batch operation on\n those items. However, we strongly recommend that you use an exponential\n backoff algorithm. If you retry the batch operation immediately, the\n underlying read or write requests can still fail due to throttling on the individual\n tables. If you delay the batch operation using exponential backoff, the individual\n requests in the batch are much more likely to succeed.
\nFor more information, see Batch Operations and Error Handling in the Amazon DynamoDB\n Developer Guide.
\nWith BatchWriteItem
, you can efficiently write or delete large amounts of\n data, such as from Amazon EMR, or copy data from another database into DynamoDB. In\n order to improve performance with these large-scale operations,\n BatchWriteItem
does not behave in the same way as individual\n PutItem
and DeleteItem
calls would. For example, you\n cannot specify conditions on individual put and delete requests, and\n BatchWriteItem
does not return deleted items in the response.
If you use a programming language that supports concurrency, you can use threads to\n write items in parallel. Your application must include the necessary logic to manage the\n threads. With languages that don't support threading, you must update or delete the\n specified items one at a time. In both situations, BatchWriteItem
performs\n the specified put and delete operations in parallel, giving you the power of the thread\n pool approach without having to introduce complexity into your application.
Parallel processing reduces latency, but each specified put and delete request\n consumes the same number of write capacity units whether it is processed in parallel or\n not. Delete operations on nonexistent items consume one write capacity unit.
\nIf one or more of the following is true, DynamoDB rejects the entire batch write\n operation:
\nOne or more tables specified in the BatchWriteItem
request does\n not exist.
Primary key attributes specified on an item in the request do not match those\n in the corresponding table's primary key schema.
\nYou try to perform multiple operations on the same item in the same\n BatchWriteItem
request. For example, you cannot put and delete\n the same item in the same BatchWriteItem
request.
Your request contains at least two items with identical hash and range keys\n (which essentially is two put operations).
\nThere are more than 25 requests in the batch.
\nAny individual item in a batch exceeds 400 KB.
\nThe total request size exceeds 16 MB.
\nThe current state of point in time recovery:
\n\n ENABLING
- Point in time recovery is being enabled.
\n ENABLED
- Point in time recovery is enabled.
\n DISABLED
- Point in time recovery is disabled.
The current state of point in time recovery:
\n\n ENABLED
- Point in time recovery is enabled.
\n DISABLED
- Point in time recovery is disabled.
Updates the status for contributor insights for a specific table or index. CloudWatch\n Contributor Insights for DynamoDB graphs display the partition key and (if applicable)\n sort key of frequently accessed items and frequently throttled items in plaintext. If\n you require the use of AWS Key Management Service (KMS) to encrypt this table’s\n partition key and sort key data with an AWS managed key or customer managed key, you\n should not enable CloudWatch Contributor Insights for DynamoDB for this table.
" + "smithy.api#documentation": "Updates the status for contributor insights for a specific table or index. CloudWatch\n Contributor Insights for DynamoDB graphs display the partition key and (if applicable)\n sort key of frequently accessed items and frequently throttled items in plaintext. If\n you require the use of Amazon Web Services Key Management Service (KMS) to encrypt this table’s\n partition key and sort key data with an Amazon Web Services managed key or customer managed key, you\n should not enable CloudWatch Contributor Insights for DynamoDB for this table.
" } }, "com.amazonaws.dynamodb#UpdateContributorInsightsInput": { diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index aa43116d61b71..11e5b7ec146e2 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -2808,6 +2808,9 @@ { "target": "com.amazonaws.ec2#ImportVolume" }, + { + "target": "com.amazonaws.ec2#ListImagesInRecycleBin" + }, { "target": "com.amazonaws.ec2#ListSnapshotsInRecycleBin" }, @@ -3090,6 +3093,9 @@ { "target": "com.amazonaws.ec2#RestoreAddressToClassic" }, + { + "target": "com.amazonaws.ec2#RestoreImageFromRecycleBin" + }, { "target": "com.amazonaws.ec2#RestoreManagedPrefixListVersion" }, @@ -8199,11 +8205,20 @@ "type": "structure", "members": { "Enabled": { - "target": "com.amazonaws.ec2#Boolean" + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Enable or disable a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.
\n\t\tValid values: true | false
\n
Default value: false
\n
Customizable text that will be displayed in a banner on Amazon Web Services provided\n\t\t\tclients when a VPN session is established. UTF-8 encoded characters only. Maximum of\n\t\t\t1400 characters.
" + } } + }, + "traits": { + "smithy.api#documentation": "Options for enabling a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.
" } }, "com.amazonaws.ec2#ClientLoginBannerResponseOptions": { @@ -8213,6 +8228,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "Enabled", + "smithy.api#documentation": "Current state of text banner feature.
\n\t\tValid values: true | false
\n
Customizable text that will be displayed in a banner on Amazon Web Services provided\n\t\t\tclients when a VPN session is established. UTF-8 encoded\n\t\t\tcharacters only. Maximum of 1400 characters.
", "smithy.api#xmlName": "bannerText" } } + }, + "traits": { + "smithy.api#documentation": "Current state of options for customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.
" } }, "com.amazonaws.ec2#ClientVpnAssociationId": { @@ -8735,6 +8755,7 @@ "target": "com.amazonaws.ec2#Integer", "traits": { "aws.protocols#ec2QueryName": "SessionTimeoutHours", + "smithy.api#documentation": "The maximum VPN session duration time in hours.
\n\t\tValid values: 8 | 10 | 12 | 24
\n
Default value: 24
\n
Options for enabling a customizable text banner that will be displayed on Amazon Web Services provided clients when a VPN session is\n\t\t\testablished.
", "smithy.api#xmlName": "clientLoginBannerOptions" } } @@ -10306,10 +10328,16 @@ } }, "SessionTimeoutHours": { - "target": "com.amazonaws.ec2#Integer" + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "The maximum VPN session duration time in hours.
\n\t\tValid values: 8 | 10 | 12 | 24
\n
Default value: 24
\n
Options for enabling a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.
" + } } } }, @@ -11158,7 +11186,7 @@ "target": "com.amazonaws.ec2#CreateImageResult" }, "traits": { - "smithy.api#documentation": "Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.
\n \n \n \n \t\n \tIf you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.
\n \tFor more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.
\nBy default, Amazon EC2 shuts down and reboots the instance before creating the AMI to ensure that everything on \n the instance is stopped and in a consistent state during the creation process. If you're confident that your \n instance is in a consistent state appropriate for AMI creation, use the NoReboot \n parameter to prevent Amazon EC2 from shutting down and rebooting the instance.
\nIf you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.
\n \tFor more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateImageRequest": { @@ -18756,7 +18784,7 @@ "target": "com.amazonaws.ec2#DeregisterImageRequest" }, "traits": { - "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch\n\t\t\tnew instances; however, it doesn't affect any instances that you've already launched\n\t\t\tfrom the AMI. You'll continue to incur usage costs for those instances until you\n\t\t\tterminate them.
\n \tWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" + "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to \n launch new instances.
\n \n \nIf you deregister an AMI that matches a Recycle Bin retention rule, the AMI is \n retained in the Recycle Bin for the specified retention period. For more information, \n see Recycle\n Bin in the Amazon Elastic Compute Cloud User Guide.
\n \nWhen you deregister an AMI, it doesn't affect any instances that you've already \n launched from the AMI. You'll continue to incur usage costs for those instances until \n you terminate them.
\n \tWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" } }, "com.amazonaws.ec2#DeregisterImageRequest": { @@ -30875,7 +30903,7 @@ "target": "com.amazonaws.ec2#DisableFastLaunchResult" }, "traits": { - "smithy.api#documentation": "Discontinue faster launching for a Windows AMI, and clean up existing pre-provisioned snapshots. \n\t\t\tWhen you disable faster launching, the AMI uses the standard launch process for each \n\t\t\tinstance. All pre-provisioned snapshots must be removed before you can enable faster launching again.
" + "smithy.api#documentation": "Discontinue faster launching for a Windows AMI, and clean up existing pre-provisioned snapshots. \n\t\t\tWhen you disable faster launching, the AMI uses the standard launch process for each \n\t\t\tinstance. All pre-provisioned snapshots must be removed before you can enable faster launching again.
\n\t\tTo change these settings, you must own the AMI.
\n\t\tWhen you enable faster launching for a Windows AMI, images are pre-provisioned, \n\t\t\tusing snapshots to launch instances up to 65% faster. To create the optimized Windows \n\t\t\timage, Amazon EC2 launches an instance and runs through Sysprep steps, rebooting as required. \n\t\t\tThen it creates a set of reserved snapshots that are used for subsequent launches. The \n\t\t\treserved snapshots are automatically replenished as they are used, depending on your \n\t\t\tsettings for launch frequency.
" + "smithy.api#documentation": "When you enable faster launching for a Windows AMI, images are pre-provisioned, \n\t\t\tusing snapshots to launch instances up to 65% faster. To create the optimized Windows \n\t\t\timage, Amazon EC2 launches an instance and runs through Sysprep steps, rebooting as required. \n\t\t\tThen it creates a set of reserved snapshots that are used for subsequent launches. The \n\t\t\treserved snapshots are automatically replenished as they are used, depending on your \n\t\t\tsettings for launch frequency.
\n\t\tTo change these settings, you must own the AMI.
\n\t\tThe maximum number of parallel instances to launch for creating resources.
" + "smithy.api#documentation": "The maximum number of parallel instances to launch for creating resources. Value must be 6
or greater.
The ID of the AMI.
", + "smithy.api#xmlName": "imageId" + } + }, + "Name": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Name", + "smithy.api#documentation": "The name of the AMI.
", + "smithy.api#xmlName": "name" + } + }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "The description of the AMI.
", + "smithy.api#xmlName": "description" + } + }, + "RecycleBinEnterTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RecycleBinEnterTime", + "smithy.api#documentation": "The date and time when the AMI entered the Recycle Bin.
", + "smithy.api#xmlName": "recycleBinEnterTime" + } + }, + "RecycleBinExitTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RecycleBinExitTime", + "smithy.api#documentation": "The date and time when the AMI is to be permanently deleted from the Recycle Bin.
", + "smithy.api#xmlName": "recycleBinExitTime" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about an AMI that is currently in the Recycle Bin.
" + } + }, + "com.amazonaws.ec2#ImageRecycleBinInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#ImageRecycleBinInfo", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#ImageState": { "type": "string", "traits": { @@ -51387,6 +51472,85 @@ } } }, + "com.amazonaws.ec2#ListImagesInRecycleBin": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ListImagesInRecycleBinRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ListImagesInRecycleBinResult" + }, + "traits": { + "smithy.api#documentation": "Lists one or more AMIs that are currently in the Recycle Bin. For more information, \n see Recycle\n Bin in the Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Images", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#ListImagesInRecycleBinMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.ec2#ListImagesInRecycleBinRequest": { + "type": "structure", + "members": { + "ImageIds": { + "target": "com.amazonaws.ec2#ImageIdStringList", + "traits": { + "smithy.api#documentation": "The IDs of the AMIs to list. Omit this parameter to list all of the AMIs that \n are in the Recycle Bin. You can specify up to 20 IDs in a single request.
", + "smithy.api#xmlName": "ImageId" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The token for the next page of results.
" + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#ListImagesInRecycleBinMaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken
value.
If you do not specify a value for MaxResults, the request \n returns 1,000 items per page by default. For more information, see \n \n Pagination.
" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Information about the AMIs.
", + "smithy.api#xmlName": "imageSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The maximum VPN session duration time in hours.
\n\t\tValid values: 8 | 10 | 12 | 24
\n
Default value: 24
\n
Options for enabling a customizable text banner that will be displayed on\n\t\t\tAmazon Web Services provided clients when a VPN session is established.
" + } } } }, @@ -65570,6 +65740,49 @@ } } }, + "com.amazonaws.ec2#RestoreImageFromRecycleBin": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#RestoreImageFromRecycleBinRequest" + }, + "output": { + "target": "com.amazonaws.ec2#RestoreImageFromRecycleBinResult" + }, + "traits": { + "smithy.api#documentation": "Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon Elastic Compute Cloud User Guide.
" + } + }, + "com.amazonaws.ec2#RestoreImageFromRecycleBinRequest": { + "type": "structure", + "members": { + "ImageId": { + "target": "com.amazonaws.ec2#ImageId", + "traits": { + "smithy.api#documentation": "The ID of the AMI to restore.
", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation
. Otherwise, it is UnauthorizedOperation
.
Returns true
if the request succeeds; otherwise, it returns an error.
The name of the cache parameter group family that this cache parameter group is compatible with.
\nValid values are: \n memcached1.4
| \n memcached1.5
| \n memcached1.6
| \n redis2.6
| \n redis2.8
|\n redis3.2
|\n redis4.0
|\n redis5.0
| \n redis6.0
| \n
The name of the cache parameter group family that this cache parameter group is compatible with.
\nValid values are: \n memcached1.4
| \n memcached1.5
| \n memcached1.6
| \n redis2.6
| \n redis2.8
|\n redis3.2
|\n redis4.0
|\n redis5.0
| \n redis6.x
| \n
Specifies the weekly time range during which maintenance\n on the cluster is performed. It is specified as a range in\n the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum\n maintenance window is a 60 minute period.\n Valid values for ddd
are:
Specifies the weekly time range during which maintenance\n on the cluster is performed. It is specified as a range in\n the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum\n maintenance window is a 60 minute period.\n
" } }, "Port": { @@ -2262,7 +2262,7 @@ "CacheParameterGroupFamily": { "target": "com.amazonaws.elasticache#String", "traits": { - "smithy.api#documentation": "The name of the cache parameter group family that the cache parameter group can be used with.
\nValid values are: \n memcached1.4
| \n memcached1.5
| \n memcached1.6
| \n redis2.6
| \n redis2.8
|\n redis3.2
|\n redis4.0
|\n redis5.0
| \n redis6.0
| \n redis6.2
\n
The name of the cache parameter group family that the cache parameter group can be used with.
\nValid values are: \n memcached1.4
| \n memcached1.5
| \n memcached1.6
| \n redis2.6
| \n redis2.8
|\n redis3.2
|\n redis4.0
|\n redis5.0
| \n redis6.x
\n
The result of a CancelElasticsearchServiceSoftwareUpdate
operation. Contains the status of the update.
The unique change identifier associated with a specific domain configuration change.
" + } + }, + "Message": { + "target": "com.amazonaws.elasticsearchservice#Message", + "traits": { + "smithy.api#documentation": "Contains an optional message associated with the domain configuration change.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies change details of the domain configuration change.
" + } + }, + "com.amazonaws.elasticsearchservice#ChangeProgressStage": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressStageName", + "traits": { + "smithy.api#documentation": "The name of the specific progress stage.
" + } + }, + "Status": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressStageStatus", + "traits": { + "smithy.api#documentation": "The overall status of a specific progress stage.
" + } + }, + "Description": { + "target": "com.amazonaws.elasticsearchservice#Description", + "traits": { + "smithy.api#documentation": "The description of the progress stage.
" + } + }, + "LastUpdated": { + "target": "com.amazonaws.elasticsearchservice#LastUpdated", + "traits": { + "smithy.api#documentation": "The last updated timestamp of the progress stage.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A progress stage details of a specific domain configuration change.
" + } + }, + "com.amazonaws.elasticsearchservice#ChangeProgressStageList": { + "type": "list", + "member": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressStage" + }, + "traits": { + "smithy.api#documentation": "The list of progress stages of a specific domain configuration change.
" + } + }, + "com.amazonaws.elasticsearchservice#ChangeProgressStageName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.elasticsearchservice#ChangeProgressStageStatus": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + } + } + }, + "com.amazonaws.elasticsearchservice#ChangeProgressStatusDetails": { + "type": "structure", + "members": { + "ChangeId": { + "target": "com.amazonaws.elasticsearchservice#GUID", + "traits": { + "smithy.api#documentation": "The unique change identifier associated with a specific domain configuration change.
" + } + }, + "StartTime": { + "target": "com.amazonaws.elasticsearchservice#UpdateTimestamp", + "traits": { + "smithy.api#documentation": "The time at which the configuration change is made on the domain.
" + } + }, + "Status": { + "target": "com.amazonaws.elasticsearchservice#OverallChangeStatus", + "traits": { + "smithy.api#documentation": "The overall status of the domain configuration change. This field can take the following values: PENDING
, PROCESSING
, COMPLETED
and FAILED
The list of properties involved in the domain configuration change that are still in pending.
" + } + }, + "CompletedProperties": { + "target": "com.amazonaws.elasticsearchservice#StringList", + "traits": { + "smithy.api#documentation": "The list of properties involved in the domain configuration change that are completed.
" + } + }, + "TotalNumberOfStages": { + "target": "com.amazonaws.elasticsearchservice#TotalNumberOfStages", + "traits": { + "smithy.api#documentation": "The total number of stages required for the configuration change.
" + } + }, + "ChangeProgressStages": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressStageList", + "traits": { + "smithy.api#documentation": "The specific stages that the domain is going through to perform the configuration change.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The progress details of a specific domain configuration change.
" + } + }, "com.amazonaws.elasticsearchservice#CloudWatchLogsLogGroupArn": { "type": "string", "traits": { @@ -1801,6 +1933,74 @@ "smithy.api#documentation": "The result of DescribeDomainAutoTunes
request. See the Developer Guide for more information.
Returns information about the current blue/green deployment happening on a domain, including\n a change ID, status, and progress stages.
", + "smithy.api#http": { + "method": "GET", + "uri": "/2015-01-01/es/domain/{DomainName}/progress", + "code": 200 + } + } + }, + "com.amazonaws.elasticsearchservice#DescribeDomainChangeProgressRequest": { + "type": "structure", + "members": { + "DomainName": { + "target": "com.amazonaws.elasticsearchservice#DomainName", + "traits": { + "smithy.api#documentation": "The domain you want to get the progress information about.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ChangeId": { + "target": "com.amazonaws.elasticsearchservice#GUID", + "traits": { + "smithy.api#documentation": "The specific change ID for which you want to get progress information. This is an optional parameter.\n If omitted, the service returns information about the most recent configuration change.\n
", + "smithy.api#httpQuery": "changeid" + } + } + }, + "traits": { + "smithy.api#documentation": "Container for the parameters to the DescribeDomainChangeProgress
operation. Specifies the\n domain name and optional change specific identity for which you want progress information.\n
Progress information for the configuration change that is requested in the DescribeDomainChangeProgress
request.\n
The result of a DescribeDomainChangeProgress
request. Contains the progress information of\n the requested domain change.\n
Container for results from DescribeReservedElasticsearchInstances
Specifies AutoTuneOptions
for the domain.
Specifies change details of the domain configuration change.
" + } } }, "traits": { @@ -3557,6 +3766,12 @@ "traits": { "smithy.api#documentation": "The current status of the Elasticsearch domain's Auto-Tune options.
" } + }, + "ChangeProgressDetails": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressDetails", + "traits": { + "smithy.api#documentation": "Specifies change details of the domain configuration change.
" + } } }, "traits": { @@ -5116,6 +5331,30 @@ "target": "com.amazonaws.elasticsearchservice#OutboundCrossClusterSearchConnection" } }, + "com.amazonaws.elasticsearchservice#OverallChangeStatus": { + "type": "string", + "traits": { + "smithy.api#documentation": "The overall status value of the domain configuration change.
", + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "PROCESSING", + "name": "PROCESSING" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, "com.amazonaws.elasticsearchservice#OwnerId": { "type": "string", "traits": { @@ -6322,6 +6561,9 @@ ] } }, + "com.amazonaws.elasticsearchservice#TotalNumberOfStages": { + "type": "integer" + }, "com.amazonaws.elasticsearchservice#UIntValue": { "type": "integer", "traits": { @@ -6656,6 +6898,9 @@ "traits": { "smithy.api#documentation": "\n This flag, when set to True, indicates that an Upgrade Eligibility Check needs to be performed.\n This will not actually perform the Upgrade.\n
" } + }, + "ChangeProgressDetails": { + "target": "com.amazonaws.elasticsearchservice#ChangeProgressDetails" } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/emr.json b/codegen/sdk-codegen/aws-models/emr.json index a07711477b264..ce4db127c9994 100644 --- a/codegen/sdk-codegen/aws-models/emr.json +++ b/codegen/sdk-codegen/aws-models/emr.json @@ -193,7 +193,7 @@ } ], "traits": { - "smithy.api#documentation": "AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed\n in each job flow.
\nIf your cluster is long-running (such as a Hive data warehouse) or complex, you may\n require more than 256 steps to process your data. You can bypass the 256-step limitation in\n various ways, including using SSH to connect to the master node and submitting queries\n directly to the software running on the master node, such as Hive and Hadoop. For more\n information on how to do this, see Add More than 256 Steps to a\n Cluster in the Amazon EMR Management Guide.
\nA step specifies the location of a JAR file stored either on the master node of the\n cluster or in Amazon S3. Each step is performed by the main function of the main class of\n the JAR file. The main class can be specified either in the manifest of the JAR or by using\n the MainFunction parameter of the step.
\nAmazon EMR executes each step in the order listed. For a step to be considered complete,\n the main function must exit with a zero exit code and all Hadoop jobs started while the\n step was running must have completed and run successfully.
\nYou can only add steps to a cluster that is in one of the following states: STARTING,\n BOOTSTRAPPING, RUNNING, or WAITING.
" + "smithy.api#documentation": "AddJobFlowSteps adds new steps to a running cluster. A maximum of 256 steps are allowed\n in each job flow.
\nIf your cluster is long-running (such as a Hive data warehouse) or complex, you may\n require more than 256 steps to process your data. You can bypass the 256-step limitation in\n various ways, including using SSH to connect to the master node and submitting queries\n directly to the software running on the master node, such as Hive and Hadoop. For more\n information on how to do this, see Add More than 256 Steps to a\n Cluster in the Amazon EMR Management Guide.
\nA step specifies the location of a JAR file stored either on the master node of the\n cluster or in Amazon S3. Each step is performed by the main function of the main class of\n the JAR file. The main class can be specified either in the manifest of the JAR or by using\n the MainFunction parameter of the step.
\nAmazon EMR executes each step in the order listed. For a step to be considered complete,\n the main function must exit with a zero exit code and all Hadoop jobs started while the\n step was running must have completed and run successfully.
\nYou can only add steps to a cluster that is in one of the following states: STARTING,\n BOOTSTRAPPING, RUNNING, or WAITING.
\nThe string values passed into HadoopJarStep
object cannot exceed a total of 10240 characters.
Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated\n with the cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Amazon Web Services Management Console. IAM principals that are\n allowed to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated\n with the cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions on the cluster that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Amazon Web Services Management Console.
Amazon EMR is a web service that makes it easier to process large amounts of data\n efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do\n tasks such as web indexing, data mining, log file analysis, machine learning, scientific\n simulation, and data warehouse management.
", + "smithy.api#title": "Amazon EMR", + "smithy.api#xmlNamespace": { + "uri": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31" + } + }, "version": "2009-03-31", "operations": [ { @@ -2544,25 +2562,7 @@ { "target": "com.amazonaws.emr#UpdateStudioSessionMapping" } - ], - "traits": { - "aws.api#service": { - "sdkId": "EMR", - "arnNamespace": "elasticmapreduce", - "cloudFormationName": "EMR", - "cloudTrailEventSource": "emr.amazonaws.com", - "endpointPrefix": "elasticmapreduce" - }, - "aws.auth#sigv4": { - "name": "elasticmapreduce" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon EMR is a web service that makes it easier to process large amounts of data\n efficiently. Amazon EMR uses Hadoop processing combined with several Amazon Web Services services to do\n tasks such as web indexing, data mining, log file analysis, machine learning, scientific\n simulation, and data warehouse management.
", - "smithy.api#title": "Amazon EMR", - "smithy.api#xmlNamespace": { - "uri": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31" - } - } + ] }, "com.amazonaws.emr#ErrorCode": { "type": "string", @@ -3566,7 +3566,7 @@ "State": { "target": "com.amazonaws.emr#InstanceGroupState", "traits": { - "smithy.api#documentation": "State of instance group. The following values are deprecated: STARTING, TERMINATED, and\n FAILED.
", + "smithy.api#documentation": "State of instance group. The following values are no longer supported: STARTING, TERMINATED, and\n FAILED.
", "smithy.api#required": {} } }, @@ -4289,7 +4289,7 @@ "VisibleToAllUsers": { "target": "com.amazonaws.emr#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated\n with the cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Amazon Web Services Management Console. IAM principals that are authorized to perform actions on the cluster can use the SetVisibleToAllUsers action to change the value on a running cluster. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Indicates whether the cluster is visible to IAM principals in the Amazon Web Services account associated\n with the cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions, regardless of IAM permissions policies attached to other IAM principals.
The default value is true
if a value is not provided when creating a\n cluster using the EMR API RunJobFlow command, the CLI\n create-cluster command, or the Amazon Web Services Management Console.
Creates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
" + "smithy.api#documentation": "Auto-termination is supported in Amazon EMR versions 5.30.0 and 6.1.0 and later. For more information, see Using an auto-termination policy.
\nCreates or updates an auto-termination policy for an Amazon EMR cluster. An auto-termination policy defines the amount of idle time in seconds after which a cluster automatically terminates. For alternative cluster termination options, see Control cluster termination.
" } }, "com.amazonaws.emr#PutAutoTerminationPolicyInput": { @@ -6469,7 +6469,7 @@ "Applications": { "target": "com.amazonaws.emr#ApplicationList", "traits": { - "smithy.api#documentation": "Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications\n for Amazon EMR to install and configure when launching the cluster. For a list of\n applications available for each Amazon EMR release version, see the Amazon EMR Release\n Guide.
" + "smithy.api#documentation": "Applies to Amazon EMR releases 4.0 and later. A case-insensitive list of applications\n for Amazon EMR to install and configure when launching the cluster. For a list of\n applications available for each Amazon EMR release version, see the Amazon EMRRelease\n Guide.
" } }, "Configurations": { @@ -6481,7 +6481,7 @@ "VisibleToAllUsers": { "target": "com.amazonaws.emr#Boolean", "traits": { - "smithy.api#documentation": "Set this value to true
so that IAM principals in the Amazon Web Services account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
The VisibleToAllUsers parameter is no longer supported. By default, the value is set to true
. Setting it to false
now has no effect.
Set this value to true
so that IAM principals in the Amazon Web Services account associated with the cluster can perform EMR actions on the cluster that their IAM policies allow. This value defaults to true
for clusters created using the EMR API or the CLI create-cluster command.
When set to false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions for the cluster, regardless of the IAM permissions policies attached to other IAM principals. For more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
Sets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
\nFor more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
" + "smithy.api#documentation": "The SetVisibleToAllUsers parameter is no longer supported. Your cluster may be visible to all users in your account.\n To restrict cluster access using an IAM policy, see Identity and Access Management for EMR.\n
\nSets the Cluster$VisibleToAllUsers value for an EMR cluster. When true
, IAM principals in the\n Amazon Web Services account can perform EMR cluster actions that their IAM policies allow. When false
, only the IAM principal that created the cluster and the Amazon Web Services account root user can perform EMR actions on the cluster, regardless of IAM permissions policies attached to other IAM principals.
This action works on running clusters. When you create a cluster, use the RunJobFlowInput$VisibleToAllUsers parameter.
\nFor more information, see Understanding the EMR Cluster VisibleToAllUsers Setting in the Amazon EMRManagement Guide.
" } }, "com.amazonaws.emr#SetVisibleToAllUsersInput": { @@ -7033,7 +7033,7 @@ "BlockDurationMinutes": { "target": "com.amazonaws.emr#WholeNumber", "traits": { - "smithy.api#documentation": "The defined duration for Spot Instances (also known as Spot blocks) in minutes. When\n specified, the Spot Instance does not terminate before the defined duration expires, and\n defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240,\n 300, or 360. The duration period starts as soon as a Spot Instance receives its instance\n ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and\n provides a Spot Instance termination notice, which gives the instance a two-minute warning\n before it terminates.
" + "smithy.api#documentation": "The defined duration for Spot Instances (also known as Spot blocks) in minutes. When\n specified, the Spot Instance does not terminate before the defined duration expires, and\n defined duration pricing for Spot Instances applies. Valid values are 60, 120, 180, 240,\n 300, or 360. The duration period starts as soon as a Spot Instance receives its instance\n ID. At the end of the duration, Amazon EC2 marks the Spot Instance for termination and\n provides a Spot Instance termination notice, which gives the instance a two-minute warning\n before it terminates.
\nSpot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022.\n
\nThe launch specification for Spot Instances in the instance fleet, which determines the\n defined duration, provisioning timeout behavior, and allocation strategy.
\nThe instance fleet configuration is available only in Amazon EMR versions 4.8.0 and\n later, excluding 5.0.x versions. Spot Instance allocation strategy is available in\n Amazon EMR version 5.12.1 and later.
\nThe launch specification for Spot Instances in the instance fleet, which determines the\n defined duration, provisioning timeout behavior, and allocation strategy.
\nThe instance fleet configuration is available only in Amazon EMR versions 4.8.0 and\n later, excluding 5.0.x versions. Spot Instance allocation strategy is available in\n Amazon EMR version 5.12.1 and later.
\nSpot Instances with a defined duration (also known as Spot blocks) are no longer available to new customers from July 1, 2021. For customers who have previously used the feature, we will continue to support Spot Instances with a defined duration until December 31, 2022.\n
\nA user-defined key, which is the minimum required information for a valid tag. For more\n information, see Tag .
" + "smithy.api#documentation": "A user-defined key, which is the minimum required information for a valid tag. For more\n information, see Tag.
" } }, "Value": { @@ -8079,7 +8079,7 @@ "VolumeType": { "target": "com.amazonaws.emr#String", "traits": { - "smithy.api#documentation": "The volume type. Volume types supported are gp2, io1, standard.
", + "smithy.api#documentation": "The volume type. Volume types supported are gp2, io1, and standard.
", "smithy.api#required": {} } }, diff --git a/codegen/sdk-codegen/aws-models/fis.json b/codegen/sdk-codegen/aws-models/fis.json index 012a1ce603f83..9d2e3a7dfed85 100644 --- a/codegen/sdk-codegen/aws-models/fis.json +++ b/codegen/sdk-codegen/aws-models/fis.json @@ -182,7 +182,7 @@ "type": "structure", "members": { "resourceType": { - "target": "com.amazonaws.fis#TargetResourceType", + "target": "com.amazonaws.fis#TargetResourceTypeId", "traits": { "smithy.api#documentation": "The resource type of the target.
" } @@ -412,9 +412,9 @@ "type": "structure", "members": { "resourceType": { - "target": "com.amazonaws.fis#ResourceType", + "target": "com.amazonaws.fis#TargetResourceTypeId", "traits": { - "smithy.api#documentation": "The Amazon Web Services resource type. The resource type must be supported for the specified action.
", + "smithy.api#documentation": "The resource type. The resource type must be supported for the specified action.
", "smithy.api#required": {} } }, @@ -442,6 +442,12 @@ "smithy.api#documentation": "Scopes the identified resources to a specific count of the resources at random, or a percentage of the resources. All identified resources are included in the target.
\nALL - Run the action on all identified targets. This is the default.
\nCOUNT(n) - Run the action on the specified number of targets, chosen from the identified targets at random.\n For example, COUNT(1) selects one of the targets.
\nPERCENT(n) - Run the action on the specified percentage of targets, chosen from the identified targets \n at random. For example, PERCENT(25) selects 25% of the targets.
\nThe resource type parameters.
" + } } }, "traits": { @@ -972,7 +978,7 @@ "type": "structure", "members": { "resourceType": { - "target": "com.amazonaws.fis#ResourceType", + "target": "com.amazonaws.fis#TargetResourceTypeId", "traits": { "smithy.api#documentation": "The resource type.
" } @@ -1000,6 +1006,12 @@ "traits": { "smithy.api#documentation": "Scopes the identified resources to a specific count or percentage.
" } + }, + "parameters": { + "target": "com.amazonaws.fis#ExperimentTargetParameterMap", + "traits": { + "smithy.api#documentation": "The resource type parameters.
" + } } }, "traits": { @@ -1077,6 +1089,34 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentTargetParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.fis#ExperimentTargetParameterName" + }, + "value": { + "target": "com.amazonaws.fis#ExperimentTargetParameterValue" + } + }, + "com.amazonaws.fis#ExperimentTargetParameterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.fis#ExperimentTargetParameterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.fis#ExperimentTargetSelectionMode": { "type": "string", "traits": { @@ -1374,7 +1414,7 @@ "type": "structure", "members": { "resourceType": { - "target": "com.amazonaws.fis#ResourceType", + "target": "com.amazonaws.fis#TargetResourceTypeId", "traits": { "smithy.api#documentation": "The resource type.
" } @@ -1402,6 +1442,12 @@ "traits": { "smithy.api#documentation": "Scopes the identified resources to a specific count or percentage.
" } + }, + "parameters": { + "target": "com.amazonaws.fis#ExperimentTemplateTargetParameterMap", + "traits": { + "smithy.api#documentation": "The resource type parameters.
" + } } }, "traits": { @@ -1507,6 +1553,35 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#ExperimentTemplateTargetParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.fis#ExperimentTemplateTargetParameterName" + }, + "value": { + "target": "com.amazonaws.fis#ExperimentTemplateTargetParameterValue" + } + }, + "com.amazonaws.fis#ExperimentTemplateTargetParameterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.fis#ExperimentTemplateTargetParameterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]+$" + } + }, "com.amazonaws.fis#ExperimentTemplateTargetSelectionMode": { "type": "string", "traits": { @@ -1551,6 +1626,9 @@ { "target": "com.amazonaws.fis#GetExperimentTemplate" }, + { + "target": "com.amazonaws.fis#GetTargetResourceType" + }, { "target": "com.amazonaws.fis#ListActions" }, @@ -1563,6 +1641,9 @@ { "target": "com.amazonaws.fis#ListTagsForResource" }, + { + "target": "com.amazonaws.fis#ListTargetResourceTypes" + }, { "target": "com.amazonaws.fis#StartExperiment" }, @@ -1727,6 +1808,55 @@ } } }, + "com.amazonaws.fis#GetTargetResourceType": { + "type": "operation", + "input": { + "target": "com.amazonaws.fis#GetTargetResourceTypeRequest" + }, + "output": { + "target": "com.amazonaws.fis#GetTargetResourceTypeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fis#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.fis#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Gets information about the specified resource type.
", + "smithy.api#http": { + "method": "GET", + "uri": "/targetResourceTypes/{resourceType}", + "code": 200 + } + } + }, + "com.amazonaws.fis#GetTargetResourceTypeRequest": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.fis#TargetResourceTypeId", + "traits": { + "smithy.api#documentation": "The resource type.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.fis#GetTargetResourceTypeResponse": { + "type": "structure", + "members": { + "targetResourceType": { + "target": "com.amazonaws.fis#TargetResourceType", + "traits": { + "smithy.api#documentation": "Information about the resource type.
" + } + } + } + }, "com.amazonaws.fis#LastUpdateTime": { "type": "timestamp" }, @@ -1990,6 +2120,79 @@ } } }, + "com.amazonaws.fis#ListTargetResourceTypes": { + "type": "operation", + "input": { + "target": "com.amazonaws.fis#ListTargetResourceTypesRequest" + }, + "output": { + "target": "com.amazonaws.fis#ListTargetResourceTypesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fis#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "Lists the target resource types.
", + "smithy.api#http": { + "method": "GET", + "uri": "/targetResourceTypes", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.fis#ListTargetResourceTypesMaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.fis#ListTargetResourceTypesRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.fis#ListTargetResourceTypesMaxResults", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken
value.
The token for the next page of results.
", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.fis#ListTargetResourceTypesResponse": { + "type": "structure", + "members": { + "targetResourceTypes": { + "target": "com.amazonaws.fis#TargetResourceTypeSummaryList", + "traits": { + "smithy.api#documentation": "The target resource types.
" + } + }, + "nextToken": { + "target": "com.amazonaws.fis#NextToken", + "traits": { + "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
The resource type.
" + } + }, + "description": { + "target": "com.amazonaws.fis#TargetResourceTypeDescription", + "traits": { + "smithy.api#documentation": "A description of the resource type.
" + } + }, + "parameters": { + "target": "com.amazonaws.fis#TargetResourceTypeParameterMap", + "traits": { + "smithy.api#documentation": "The parameters for the resource type.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a resource type.
" + } + }, + "com.amazonaws.fis#TargetResourceTypeDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^[\\s\\S]+$" + } + }, + "com.amazonaws.fis#TargetResourceTypeId": { "type": "string", "traits": { "smithy.api#length": { @@ -2305,6 +2534,85 @@ "smithy.api#pattern": "^[\\S]+$" } }, + "com.amazonaws.fis#TargetResourceTypeParameter": { + "type": "structure", + "members": { + "description": { + "target": "com.amazonaws.fis#TargetResourceTypeParameterDescription", + "traits": { + "smithy.api#documentation": "A description of the parameter.
" + } + }, + "required": { + "target": "com.amazonaws.fis#TargetResourceTypeParameterRequired", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "Indicates whether the parameter is required.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes the parameters for a resource type. Use parameters to determine which tasks are\n identified during target resolution.
" + } + }, + "com.amazonaws.fis#TargetResourceTypeParameterDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": "^[\\s\\S]+$" + } + }, + "com.amazonaws.fis#TargetResourceTypeParameterMap": { + "type": "map", + "key": { + "target": "com.amazonaws.fis#TargetResourceTypeParameterName" + }, + "value": { + "target": "com.amazonaws.fis#TargetResourceTypeParameter" + } + }, + "com.amazonaws.fis#TargetResourceTypeParameterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 64 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.fis#TargetResourceTypeParameterRequired": { + "type": "boolean" + }, + "com.amazonaws.fis#TargetResourceTypeSummary": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.fis#TargetResourceTypeId", + "traits": { + "smithy.api#documentation": "The resource type.
" + } + }, + "description": { + "target": "com.amazonaws.fis#TargetResourceTypeDescription", + "traits": { + "smithy.api#documentation": "A description of the resource type.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Describes a resource type.
" + } + }, + "com.amazonaws.fis#TargetResourceTypeSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.fis#TargetResourceTypeSummary" + } + }, "com.amazonaws.fis#UntagResource": { "type": "operation", "input": { @@ -2506,9 +2814,9 @@ "type": "structure", "members": { "resourceType": { - "target": "com.amazonaws.fis#ResourceType", + "target": "com.amazonaws.fis#TargetResourceTypeId", "traits": { - "smithy.api#documentation": "The Amazon Web Services resource type. The resource type must be supported for the specified action.
", + "smithy.api#documentation": "The resource type. The resource type must be supported for the specified action.
", "smithy.api#required": {} } }, @@ -2536,6 +2844,12 @@ "smithy.api#documentation": "Scopes the identified resources to a specific count or percentage.
", "smithy.api#required": {} } + }, + "parameters": { + "target": "com.amazonaws.fis#ExperimentTemplateTargetParameterMap", + "traits": { + "smithy.api#documentation": "The resource type parameters.
" + } } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index d20b258a4635c..0d32257f47c1a 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -2384,7 +2384,7 @@ "DataFormat": { "target": "com.amazonaws.glue#DataFormat", "traits": { - "smithy.api#documentation": "The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently only AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The data format of the schema definition. Currently AVRO
and JSON
are supported.
The data format of the schema definition. Currently AVRO
, JSON
and PROTOBUF
are supported.
The transaction at which to do the write.
", - "smithy.api#required": {} + "smithy.api#documentation": "The transaction at which to do the write.
" } }, "WriteOperations": { diff --git a/codegen/sdk-codegen/aws-models/marketplace-metering.json b/codegen/sdk-codegen/aws-models/marketplace-metering.json index c3ed9984900ee..15abd8c1f5273 100644 --- a/codegen/sdk-codegen/aws-models/marketplace-metering.json +++ b/codegen/sdk-codegen/aws-models/marketplace-metering.json @@ -31,6 +31,21 @@ "shapes": { "com.amazonaws.marketplacemetering#AWSMPMeteringService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Marketplace Metering", + "arnNamespace": "aws-marketplace", + "cloudFormationName": "MarketplaceMetering", + "cloudTrailEventSource": "marketplacemetering.amazonaws.com", + "endpointPrefix": "metering.marketplace" + }, + "aws.auth#sigv4": { + "name": "aws-marketplace" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "This reference provides descriptions of the low-level AWS Marketplace Metering Service\n API.
\nAWS Marketplace sellers can use this API to submit usage data for custom usage\n dimensions.
\nFor information on the permissions you need to use this API, see AWS Marketplace metering and entitlement API permissions in the\n AWS Marketplace Seller Guide.\n
\n\n Submitting Metering Records\n
\n\n MeterUsage - Submits the metering record for an AWS\n Marketplace product. MeterUsage
is called from an EC2 instance or a\n container running on EKS or ECS.
\n BatchMeterUsage - Submits the metering record for a set of\n customers. BatchMeterUsage
is called from a software-as-a-service\n (SaaS) application.
\n Accepting New Customers\n
\n\n ResolveCustomer - Called by a SaaS application during the\n registration process. When a buyer visits your website during the registration\n process, the buyer submits a Registration Token through the browser. The\n Registration Token is resolved through this API to obtain a\n CustomerIdentifier
\n \n along with the CustomerAWSAccountId
and\n ProductCode
.
\n Entitlement and Metering for Paid Container Products\n
\nPaid container software products sold through AWS Marketplace must integrate\n with the AWS Marketplace Metering Service and call the\n RegisterUsage
operation for software entitlement and metering.\n Free and BYOL products for Amazon ECS or Amazon EKS aren't required to call\n RegisterUsage
, but you can do so if you want to receive usage\n data in your seller reports. For more information on using the\n RegisterUsage
operation, see Container-Based Products.
\n BatchMeterUsage
API calls are captured by AWS CloudTrail. You can use\n Cloudtrail to verify that the SaaS metering records that you sent are accurate by\n searching for records with the eventName
of BatchMeterUsage
.\n You can also use CloudTrail to audit records over time. For more information, see the\n \n AWS CloudTrail User Guide.\n
This reference provides descriptions of the low-level AWS Marketplace Metering\n Service API.
\nAWS Marketplace sellers can use this API to submit usage data for custom usage\n dimensions.
\nFor information on the permissions you need to use this API, see\n AWS Marketing metering and entitlement API permissions in the AWS Marketplace Seller Guide. \n
\n\n Submitting Metering Records\n
\n\n MeterUsage- Submits the metering record for a Marketplace\n product. MeterUsage is called from an EC2 instance or a container running on EKS\n or ECS.
\n\n BatchMeterUsage- Submits the metering record for a set of\n customers. BatchMeterUsage is called from a software-as-a-service (SaaS)\n application.
\n\n Accepting New Customers\n
\n\n ResolveCustomer- Called by a SaaS application during the\n registration process. When a buyer visits your website during the registration\n process, the buyer submits a Registration Token through the browser. The\n Registration Token is resolved through this API to obtain a CustomerIdentifier\n and Product Code.
\n\n Entitlement and Metering for Paid Container Products\n
\nPaid container software products sold through AWS Marketplace must\n integrate with the AWS Marketplace Metering Service and call the RegisterUsage\n operation for software entitlement and metering. Free and BYOL products for\n Amazon ECS or Amazon EKS aren't required to call RegisterUsage, but you can do\n so if you want to receive usage data in your seller reports. For more\n information on using the RegisterUsage operation, see Container-Based Products.
\nBatchMeterUsage API calls are captured by AWS CloudTrail. You can use Cloudtrail to\n verify that the SaaS metering records that you sent are accurate by searching for\n records with the eventName of BatchMeterUsage. You can also use CloudTrail to audit\n records over time. For more information, see the \n AWS CloudTrail User Guide\n .
", - "smithy.api#title": "AWSMarketplace Metering" - } + ] }, "com.amazonaws.marketplacemetering#AllocatedUsageQuantity": { "type": "integer", @@ -110,7 +110,7 @@ } ], "traits": { - "smithy.api#documentation": "BatchMeterUsage is called from a SaaS application listed on the AWS Marketplace to\n post metering records for a set of customers.
\nFor identical requests, the API is idempotent; requests can be retried with the\n same records or a subset of the input records.
\nEvery request to BatchMeterUsage is for one product. If you need to meter usage for\n multiple products, you must make multiple calls to BatchMeterUsage.
\nBatchMeterUsage can process up to 25 UsageRecords at a time.
\nA UsageRecord can optionally include multiple usage allocations, to provide customers\n with usagedata split into buckets by tags that you define (or allow the customer to \n define).
\nBatchMeterUsage requests must be less than 1MB in size.
" + "smithy.api#documentation": "\n BatchMeterUsage
is called from a SaaS application listed on AWS\n Marketplace to post metering records for a set of customers.
For identical requests, the API is idempotent; requests can be retried with the same\n records or a subset of the input records.
\nEvery request to BatchMeterUsage
is for one product. If you need to meter\n usage for multiple products, you must make multiple calls to\n BatchMeterUsage
.
Usage records are expected to be submitted as quickly as possible after the event that\n is being recorded, and are not accepted more than 6 hours after the event.
\n\n BatchMeterUsage
can process up to 25 UsageRecords
at a\n time.
A UsageRecord
can optionally include multiple usage allocations, to\n provide customers with usage data split into buckets by tags that you define (or allow\n the customer to define).
\n BatchMeterUsage
returns a list of UsageRecordResult
objects,\n showing the result for each UsageRecord
, as well as a list of\n UnprocessedRecords
, indicating errors in the service side that you\n should retry.
\n BatchMeterUsage
requests must be less than 1MB in size.
For an example of using BatchMeterUsage
, see BatchMeterUsage code example in the AWS Marketplace Seller\n Guide.
The set of UsageRecords to submit. BatchMeterUsage accepts up to 25 UsageRecords at\n a time.
", + "smithy.api#documentation": "The set of UsageRecords
to submit. BatchMeterUsage
accepts\n up to 25 UsageRecords
at a time.
Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new\n product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A BatchMeterUsageRequest contains UsageRecords, which indicate quantities of usage\n within your application.
" + "smithy.api#documentation": "A BatchMeterUsageRequest
contains UsageRecords
, which\n indicate quantities of usage within your application.
Contains all UsageRecords processed by BatchMeterUsage. These records were either\n honored by AWS Marketplace Metering Service or were invalid.
" + "smithy.api#documentation": "Contains all UsageRecords
processed by BatchMeterUsage
.\n These records were either honored by AWS Marketplace Metering Service or were invalid.\n Invalid records should be fixed before being resubmitted.
Contains all UsageRecords that were not processed by BatchMeterUsage. This is a\n list of UsageRecords. You can retry the failed request by making another BatchMeterUsage\n call with this list as input in the BatchMeterUsageRequest.
" + "smithy.api#documentation": "Contains all UsageRecords
that were not processed by\n BatchMeterUsage
. This is a list of UsageRecords
. You can\n retry the failed request by making another BatchMeterUsage
call with this\n list as input in the BatchMeterUsageRequest
.
Contains the UsageRecords processed by BatchMeterUsage and any records that have\n failed due to transient error.
" + "smithy.api#documentation": "Contains the UsageRecords
processed by BatchMeterUsage
and\n any records that have failed due to transient error.
A metering record has already been emitted by the same EC2 instance, ECS task, or\n EKS pod for the given {usageDimension, timestamp} with a different\n usageQuantity.
", + "smithy.api#documentation": "A metering record has already been emitted by the same EC2 instance, ECS task, or EKS\n pod for the given {usageDimension
, timestamp
} with a different\n usageQuantity
.
The submitted registration token has expired. This can happen if the buyer's\n browser takes too long to redirect to your page, the buyer has resubmitted the\n registration token, or your application has held on to the registration token for too\n long. Your SaaS registration website should redeem this token as soon as it is submitted\n by the buyer's browser.
", + "smithy.api#documentation": "The submitted registration token has expired. This can happen if the buyer's browser\n takes too long to redirect to your page, the buyer has resubmitted the registration\n token, or your application has held on to the registration token for too long. Your SaaS\n registration website should redeem this token as soon as it is submitted by the buyer's\n browser.
", "smithy.api#error": "client" } }, @@ -239,7 +249,7 @@ } }, "traits": { - "smithy.api#documentation": "You have metered usage for a CustomerIdentifier that does not exist.
", + "smithy.api#documentation": "You have metered usage for a CustomerIdentifier
that does not\n exist.
RegisterUsage must be called in the same AWS Region the ECS task was launched in.\n This prevents a container from hardcoding a Region (e.g. withRegion(“us-east-1”) when\n calling RegisterUsage.
", + "smithy.api#documentation": "\n RegisterUsage
must be called in the same AWS Region the ECS task was\n launched in. This prevents a container from hardcoding a Region (e.g.\n withRegion(“us-east-1”) when calling RegisterUsage
.
The usage allocation objects are invalid, or the number of allocations is greater\n than 500 for a single usage record.
", + "smithy.api#documentation": "The usage allocation objects are invalid, or the number of allocations is greater than\n 500 for a single usage record.
", "smithy.api#error": "client" } }, @@ -335,7 +345,7 @@ } }, "traits": { - "smithy.api#documentation": "The usage dimension does not match one of the UsageDimensions associated with\n products.
", + "smithy.api#documentation": "The usage dimension does not match one of the UsageDimensions
associated\n with products.
API to emit metering records. For identical requests, the API is idempotent. It\n simply returns the metering record ID.
\nMeterUsage is authenticated on the buyer's AWS account using credentials from the\n EC2 instance, ECS task, or EKS pod.
\nMeterUsage can optionally include multiple usage allocations, to provide customers\n with usage data split into buckets by tags that you define (or allow the customer to\n define).
" + "smithy.api#documentation": "API to emit metering records. For identical requests, the API is idempotent. It simply\n returns the metering record ID.
\n\n MeterUsage
is authenticated on the buyer's AWS account using credentials\n from the EC2 instance, ECS task, or EKS pod.
\n MeterUsage
can optionally include multiple usage allocations, to provide\n customers with usage data split into buckets by tags that you define (or allow the\n customer to define).
Usage records are expected to be submitted as quickly as possible after the event that\n is being recorded, and are not accepted more than 6 hours after the event.
" } }, "com.amazonaws.marketplacemetering#MeterUsageRequest": { @@ -389,14 +399,14 @@ "ProductCode": { "target": "com.amazonaws.marketplacemetering#ProductCode", "traits": { - "smithy.api#documentation": "Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new\n product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } }, "Timestamp": { "target": "com.amazonaws.marketplacemetering#Timestamp", "traits": { - "smithy.api#documentation": "Timestamp, in UTC, for which the usage is being reported. Your application can\n meter usage for up to one hour in the past. Make sure the timestamp value is not before\n the start of the software usage.
", + "smithy.api#documentation": "Timestamp, in UTC, for which the usage is being reported. Your application can meter\n usage for up to one hour in the past. Make sure the timestamp
value is not\n before the start of the software usage.
Consumption value for the hour. Defaults to 0
if not\n specified.
Consumption value for the hour. Defaults to 0
if not specified.
Checks whether you have the permissions required for the action, but does not make\n the request. If you have the permissions, the request returns DryRunOperation;\n otherwise, it returns UnauthorizedException. Defaults to false
if not\n specified.
Checks whether you have the permissions required for the action, but does not make the\n request. If you have the permissions, the request returns DryRunOperation
;\n otherwise, it returns UnauthorizedException
. Defaults to false
\n if not specified.
The set of UsageAllocations to submit.
\nThe sum of all UsageAllocation quantities must equal the\n UsageQuantity of the MeterUsage request, and each UsageAllocation must have a\n unique set of tags (include no tags).
" + "smithy.api#documentation": "The set of UsageAllocations
to submit.
The sum of all UsageAllocation
quantities must equal the\n UsageQuantity
of the MeterUsage
request, and each\n UsageAllocation
must have a unique set of tags (include no\n tags).
Paid container software products sold through AWS Marketplace must integrate with\n the AWS Marketplace Metering Service and call the RegisterUsage operation for software\n entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS aren't\n required to call RegisterUsage, but you may choose to do so if you would like to receive\n usage data in your seller reports. The sections below explain the behavior of\n RegisterUsage. RegisterUsage performs two primary functions: metering and\n entitlement.
\n\n\n Entitlement: RegisterUsage allows you to verify that the\n customer running your paid software is subscribed to your product on AWS\n Marketplace, enabling you to guard against unauthorized use. Your container\n image that integrates with RegisterUsage is only required to guard against\n unauthorized use at container startup, as such a\n CustomerNotSubscribedException/PlatformNotSupportedException will only be thrown\n on the initial call to RegisterUsage. Subsequent calls from the same Amazon ECS\n task instance (e.g. task-id) or Amazon EKS pod will not throw a\n CustomerNotSubscribedException, even if the customer unsubscribes while the\n Amazon ECS task or Amazon EKS pod is still running.
\n\n Metering: RegisterUsage meters software use per ECS task,\n per hour, or per pod for Amazon EKS with usage prorated to the second. A minimum\n of 1 minute of usage applies to tasks that are short lived. For example, if a\n customer has a 10 node Amazon ECS or Amazon EKS cluster and a service configured\n as a Daemon Set, then Amazon ECS or Amazon EKS will launch a task on all 10\n cluster nodes and the customer will be charged: (10 * hourly_rate). Metering for\n software use is automatically handled by the AWS Marketplace Metering Control\n Plane -- your software is not required to perform any metering specific actions,\n other than call RegisterUsage once for metering of software use to commence. The\n AWS Marketplace Metering Control Plane will also continue to bill customers for\n running ECS tasks and Amazon EKS pods, regardless of the customers subscription\n state, removing the need for your software to perform entitlement checks at\n runtime.
\nPaid container software products sold through AWS Marketplace must integrate with the\n AWS Marketplace Metering Service and call the RegisterUsage
operation for\n software entitlement and metering. Free and BYOL products for Amazon ECS or Amazon EKS\n aren't required to call RegisterUsage
, but you may choose to do so if you\n would like to receive usage data in your seller reports. The sections below explain the\n behavior of RegisterUsage
. RegisterUsage
performs two primary\n functions: metering and entitlement.
\n Entitlement: RegisterUsage
allows you to\n verify that the customer running your paid software is subscribed to your\n product on AWS Marketplace, enabling you to guard against unauthorized use. Your\n container image that integrates with RegisterUsage
is only required\n to guard against unauthorized use at container startup, as such a\n CustomerNotSubscribedException
or\n PlatformNotSupportedException
will only be thrown on the\n initial call to RegisterUsage
. Subsequent calls from the same\n Amazon ECS task instance (e.g. task-id) or Amazon EKS pod will not throw a\n CustomerNotSubscribedException
, even if the customer\n unsubscribes while the Amazon ECS task or Amazon EKS pod is still\n running.
\n Metering: RegisterUsage
meters software use\n per ECS task, per hour, or per pod for Amazon EKS with usage prorated to the\n second. A minimum of 1 minute of usage applies to tasks that are short lived.\n For example, if a customer has a 10 node Amazon ECS or Amazon EKS cluster and a\n service configured as a Daemon Set, then Amazon ECS or Amazon EKS will launch a\n task on all 10 cluster nodes and the customer will be charged: (10 *\n hourly_rate). Metering for software use is automatically handled by the AWS\n Marketplace Metering Control Plane -- your software is not required to perform\n any metering specific actions, other than call RegisterUsage
once\n for metering of software use to commence. The AWS Marketplace Metering Control\n Plane will also continue to bill customers for running ECS tasks and Amazon EKS\n pods, regardless of the customers subscription state, removing the need for your\n software to perform entitlement checks at runtime.
Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new\n product.
", + "smithy.api#documentation": "Product code is used to uniquely identify a product in AWS Marketplace. The product\n code should be the same as the one used during the publishing of a new product.
", "smithy.api#required": {} } }, @@ -534,7 +544,7 @@ "Nonce": { "target": "com.amazonaws.marketplacemetering#Nonce", "traits": { - "smithy.api#documentation": "(Optional) To scope down the registration to a specific running software instance\n and guard against replay attacks.
" + "smithy.api#documentation": "(Optional) To scope down the registration to a specific running software instance and\n guard against replay attacks.
" } } } @@ -582,7 +592,7 @@ } ], "traits": { - "smithy.api#documentation": "ResolveCustomer is called by a SaaS application during the registration process.\n When a buyer visits your website during the registration process, the buyer submits a\n registration token through their browser. The registration token is resolved through\n this API to obtain a CustomerIdentifier and product code.
" + "smithy.api#documentation": "\n ResolveCustomer
is called by a SaaS application during the registration\n process. When a buyer visits your website during the registration process, the buyer\n submits a registration token through their browser. The registration token is resolved\n through this API to obtain a CustomerIdentifier
\n along with the\n CustomerAWSAccountId
and\n ProductCode
.
The API needs to called from the seller account id used to publish the SaaS\n application to successfully resolve the token.
\nFor an example of using ResolveCustomer
, see ResolveCustomer code example in the AWS Marketplace Seller\n Guide.
When a buyer visits your website during the registration process, the buyer submits\n a registration token through the browser. The registration token is resolved to obtain a\n CustomerIdentifier and product code.
", + "smithy.api#documentation": "When a buyer visits your website during the registration process, the buyer submits a\n registration token through the browser. The registration token is resolved to obtain a\n CustomerIdentifier
\n along with the\n CustomerAWSAccountId
\n and\n ProductCode
.
Contains input to the ResolveCustomer operation.
" + "smithy.api#documentation": "Contains input to the ResolveCustomer
operation.
The CustomerIdentifier is used to identify an individual customer in your\n application. Calls to BatchMeterUsage require CustomerIdentifiers for each\n UsageRecord.
" + "smithy.api#documentation": "The CustomerIdentifier
is used to identify an individual customer in your\n application. Calls to BatchMeterUsage
require\n CustomerIdentifiers
for each UsageRecord
.
The product code is returned to confirm that the buyer is registering for your\n product. Subsequent BatchMeterUsage calls should be made using this product\n code.
" + "smithy.api#documentation": "The product code is returned to confirm that the buyer is registering for your\n product. Subsequent BatchMeterUsage
calls should be made using this product\n code.
The CustomerAWSAccountId
provides the AWS account ID associated with the\n CustomerIdentifier
for the individual customer.
The result of the ResolveCustomer operation. Contains the CustomerIdentifier and\n product code.
" + "smithy.api#documentation": "The result of the ResolveCustomer
operation. Contains the\n CustomerIdentifier
\n \n along with the CustomerAWSAccountId
and\n ProductCode
.
One part of a key-value pair that makes up a tag. A key is a label that acts like a\n category for the specific tag values.
", + "smithy.api#documentation": "One part of a key-value pair that makes up a tag
. A key
is a\n label that acts like a category for the specific tag values.
One part of a key-value pair that makes up a tag. A value acts as a descriptor within\n a tag category (key). The value can be empty or null.
", + "smithy.api#documentation": "One part of a key-value pair that makes up a tag
. A value
\n acts as a descriptor within a tag category (key). The value can be empty or null.
Metadata assigned to an allocation. Each tag is made up of a key and a value.
" + "smithy.api#documentation": "Metadata assigned to an allocation. Each tag is made up of a key
and a\n value
.
The timestamp value passed in the meterUsage() is out of allowed range.
", + "smithy.api#documentation": "The timestamp
value passed in the UsageRecord
is out of\n allowed range.
For BatchMeterUsage
, if any of the records are outside of the allowed\n range, the entire batch is not processed. You must remove invalid records and try\n again.
The set of tags that define the bucket of usage. For the bucket of items with no\n tags, this parameter can be left out.
" + "smithy.api#documentation": "The set of tags that define the bucket of usage. For the bucket of items with no tags,\n this parameter can be left out.
" } } }, "traits": { - "smithy.api#documentation": "Usage allocations allow you to split usage into buckets by tags.
\nEach UsageAllocation indicates the usage quantity for a specific set of tags.
" + "smithy.api#documentation": "Usage allocations allow you to split usage into buckets by tags.
\nEach UsageAllocation
indicates the usage quantity for a specific set of\n tags.
Timestamp, in UTC, for which the usage is being reported.
\nYour application can meter usage for up to one hour in the past. Make sure the\n timestamp value is not before the start of the software usage.
", + "smithy.api#documentation": "Timestamp, in UTC, for which the usage is being reported.
\nYour application can meter usage for up to one hour in the past. Make sure the\n timestamp
value is not before the start of the software usage.
The CustomerIdentifier is obtained through the ResolveCustomer operation and\n represents an individual buyer in your application.
", + "smithy.api#documentation": "The CustomerIdentifier
is obtained through the\n ResolveCustomer
operation and represents an individual buyer in your\n application.
During the process of registering a product on AWS Marketplace, up to eight\n dimensions are specified. These represent different units of value in your\n application.
", + "smithy.api#documentation": "During the process of registering a product on AWS Marketplace, dimensions are\n specified. These represent different units of value in your application.
", "smithy.api#required": {} } }, @@ -790,12 +806,12 @@ "UsageAllocations": { "target": "com.amazonaws.marketplacemetering#UsageAllocations", "traits": { - "smithy.api#documentation": "The set of UsageAllocations to submit. The sum of all UsageAllocation quantities \n must equal the Quantity of the UsageRecord.
" + "smithy.api#documentation": "The set of UsageAllocations
to submit. The sum of all\n UsageAllocation
quantities must equal the Quantity of the\n UsageRecord
.
A UsageRecord indicates a quantity of usage for a given product, customer,\n dimension and time.
\nMultiple requests with the same UsageRecords as input will be deduplicated to\n prevent double charges.
" + "smithy.api#documentation": "A UsageRecord
indicates a quantity of usage for a given product,\n customer, dimension and time.
Multiple requests with the same UsageRecords
as input will be\n de-duplicated to prevent double charges.
The UsageRecord that was part of the BatchMeterUsage request.
" + "smithy.api#documentation": "The UsageRecord
that was part of the BatchMeterUsage
\n request.
The MeteringRecordId is a unique identifier for this metering event.
" + "smithy.api#documentation": "The MeteringRecordId
is a unique identifier for this metering\n event.
The UsageRecordResult Status indicates the status of an individual UsageRecord\n processed by BatchMeterUsage.
\n\n Success- The UsageRecord was accepted and honored by\n BatchMeterUsage.
\n\n CustomerNotSubscribed- The CustomerIdentifier specified is\n not subscribed to your product. The UsageRecord was not honored. Future\n UsageRecords for this customer will fail until the customer subscribes to your\n product.
\n\n DuplicateRecord- Indicates that the UsageRecord was invalid\n and not honored. A previously metered UsageRecord had the same customer,\n dimension, and time, but a different quantity.
\nThe UsageRecordResult
\n Status
indicates the status of an individual UsageRecord
\n processed by BatchMeterUsage
.
\n Success- The UsageRecord
was accepted and\n honored by BatchMeterUsage
.
\n CustomerNotSubscribed- The CustomerIdentifier
\n specified is not able to use your product. The UsageRecord
was not\n honored. There are three causes for this result:
The customer identifier is invalid.
\nThe customer identifier provided in the metering record does not have\n an active agreement or subscription with this product. Future\n UsageRecords
for this customer will fail until the\n customer subscribes to your product.
The customer's AWS account was suspended.
\n\n DuplicateRecord- Indicates that the\n UsageRecord
was invalid and not honored. A previously metered\n UsageRecord
had the same customer, dimension, and time, but a\n different quantity.
A UsageRecordResult indicates the status of a given UsageRecord processed by\n BatchMeterUsage.
" + "smithy.api#documentation": "A UsageRecordResult
indicates the status of a given\n UsageRecord
processed by BatchMeterUsage
.
Creates a recommender with the recipe (a Domain dataset group use case) you specify. \n You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a\n GetRecommendations\n request.\n
\n \n \n \n\n Status\n
\nA recommender can be in one of the following states:
\nCREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
\nDELETE PENDING > DELETE IN_PROGRESS
\nTo get the recommender status, call DescribeRecommender.
\nWait until the status
of the recommender\n is ACTIVE
before asking the recommender for recommendations.
\n Related APIs\n
\n\n ListRecommenders\n
\n\n DescribeRecommender\n
\n\n UpdateRecommender\n
\n\n DeleteRecommender\n
\nCreates a recommender with the recipe (a Domain dataset group use case) you specify. \n You create recommenders for a Domain dataset group and specify the recommender's Amazon Resource Name (ARN) when you make a\n GetRecommendations\n request.\n
\n \n\n Minimum recommendation requests per second\n
\n \nWhen you create a recommender, you can configure the recommender's minimum recommendation requests per second. The minimum recommendation requests per second\n (minRecommendationRequestsPerSecond
) specifies the baseline recommendation request throughput provisioned by\n Amazon Personalize. The default minRecommendationRequestsPerSecond is 1
. A recommendation request is a single GetRecommendations
operation.\n Request throughput is measured in requests per second and Amazon Personalize uses your requests per second to derive\n your requests per hour and the price of your recommender usage.\n
\n If your requests per second increases beyond\n minRecommendationRequestsPerSecond
, Amazon Personalize auto-scales the provisioned capacity up and down,\n but never below minRecommendationRequestsPerSecond
.\n There's a short time delay while the capacity is increased that might cause loss of\n requests.
\n Your bill is the greater of either the minimum requests per hour (based on minRecommendationRequestsPerSecond)\n or the actual number of requests. The actual request throughput used is calculated as the average requests/second within a one-hour window.\n \n We recommend starting with the default minRecommendationRequestsPerSecond
, track\n your usage using Amazon CloudWatch metrics, and then increase the minRecommendationRequestsPerSecond
\n as necessary.\n
\n Status\n
\nA recommender can be in one of the following states:
\nCREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED
\nDELETE PENDING > DELETE IN_PROGRESS
\nTo get the recommender status, call DescribeRecommender.
\nWait until the status
of the recommender\n is ACTIVE
before asking the recommender for recommendations.
\n Related APIs\n
\n\n ListRecommenders\n
\n\n DescribeRecommender\n
\n\n UpdateRecommender\n
\n\n DeleteRecommender\n
\nSpecifies the exploration configuration hyperparameters, including explorationWeight
and \n explorationItemAgeCutOff
, you want to use to configure the amount of item exploration Amazon Personalize uses when\n recommending items. Provide itemExplorationConfig
data only if your recommenders generate personalized recommendations for a user\n (not popular items or similar items).
Specifies the requested minimum provisioned recommendation requests per second that\n Amazon Personalize will support.
" + } } }, "traits": { @@ -5767,7 +5773,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides a summary of the properties of a recommender update. For a complete listing, call the\n DescribeRecommender API.
" + "smithy.api#documentation": "Provides a summary of the properties of a recommender update. For a complete listing, call the\n DescribeRecommender API operation.
" } }, "com.amazonaws.personalize#Recommenders": { @@ -5859,7 +5865,7 @@ "kmsKeyArn": { "target": "com.amazonaws.personalize#KmsKeyArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to\n encrypt or decrypt the input and output files of a batch inference job.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Key Management Service (KMS) key that Amazon Personalize uses to\n encrypt or decrypt the input and output files.
" } } }, diff --git a/codegen/sdk-codegen/aws-models/rbin.json b/codegen/sdk-codegen/aws-models/rbin.json index 9c6f1bff52c17..01721089ae483 100644 --- a/codegen/sdk-codegen/aws-models/rbin.json +++ b/codegen/sdk-codegen/aws-models/rbin.json @@ -43,7 +43,7 @@ "name": "rbin" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "This is the Recycle Bin API Reference. This documentation provides \n descriptions and syntax for each of the actions and data types in Recycle Bin.
\n \nRecycle Bin is a snapshot recovery feature that enables you to restore accidentally \n deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained \n in the Recycle Bin for a time period that you specify.
\n \nYou can restore a snapshot from the Recycle Bin at any time before its retention period \n expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the \n Recycle Bin, and you can then use it in the same way you use any other snapshot in your \n account. If the retention period expires and the snapshot is not restored, the snapshot is \n permanently deleted from the Recycle Bin and is no longer available for recovery. For more \n information about Recycle Bin, see \n Recycle Bin in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "This is the Recycle Bin API Reference. This documentation provides \n descriptions and syntax for each of the actions and data types in Recycle Bin.
\n \nRecycle Bin is a resource recovery feature that enables you to restore accidentally \n deleted snapshots and EBS-backed AMIs. When using Recycle Bin, if your resources are \n deleted, they are retained in the Recycle Bin for a time period that you specify.
\n \nYou can restore a resource from the Recycle Bin at any time before its retention period \n expires. After you restore a resource from the Recycle Bin, the resource is removed from the \n Recycle Bin, and you can then use it in the same way you use any other resource of that type \n in your account. If the retention period expires and the resource is not restored, the resource \n is permanently deleted from the Recycle Bin and is no longer available for recovery. For more \n information about Recycle Bin, see \n Recycle Bin in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#title": "Amazon Recycle Bin" }, "version": "2021-06-15", @@ -94,7 +94,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Recycle Bin retention rule. For more information, see \n Create Recycle Bin retention rules in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Creates a Recycle Bin retention rule. For more information, see \n Create Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/rules", @@ -115,7 +115,7 @@ "Description": { "target": "com.amazonaws.rbin#Description", "traits": { - "smithy.api#documentation": "A brief description for the retention rule.
" + "smithy.api#documentation": "The retention rule description.
" } }, "Tags": { @@ -127,14 +127,14 @@ "ResourceType": { "target": "com.amazonaws.rbin#ResourceType", "traits": { - "smithy.api#documentation": "The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are \n supported.
", + "smithy.api#documentation": "The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots \n and EBS-backed AMIs are supported. To retain snapshots, specify EBS_SNAPSHOT
. To \n retain EBS-backed AMIs, specify EC2_IMAGE
.
Information about the resource tags to use to identify resources that are to be retained \n by the retention rule. The retention rule retains only deleted snapshots that have one or more \n of the specified tag key and value pairs. If a snapshot is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained \n by the retention rule.
\nYou can add the same tag key and value pair to a maximum or five retention rules.
" + "smithy.api#documentation": "Specifies the resource tags to use to identify resources that are to be retained by a \n tag-level retention rule. For tag-level retention rules, only deleted resources, of the specified resource type, that \n have one or more of the specified tag key and value pairs are retained. If a resource is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained by the retention rule.
\nYou can add the same tag key and value pair to a maximum or five retention rules.
\nTo create a Region-level retention rule, omit this parameter. A Region-level retention rule \n does not have any resource tags specified. It retains all deleted resources of the specified \n resource type in the Region in which the rule is created, even if the resources are not tagged.
" } } } @@ -145,7 +145,7 @@ "Identifier": { "target": "com.amazonaws.rbin#RuleIdentifier", "traits": { - "smithy.api#documentation": "The unique identifier of the retention rule.
" + "smithy.api#documentation": "The unique ID of the retention rule.
" } }, "RetentionPeriod": { @@ -160,7 +160,7 @@ "Tags": { "target": "com.amazonaws.rbin#TagList", "traits": { - "smithy.api#documentation": "The tags assigned to the retention rule.
" + "smithy.api#documentation": "Information about the tags assigned to the retention rule.
" } }, "ResourceType": { @@ -178,7 +178,7 @@ "Status": { "target": "com.amazonaws.rbin#RuleStatus", "traits": { - "smithy.api#documentation": "The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
\n state retain resources.
Deletes a Recycle Bin retention rule. For more information, see \n Delete Recycle Bin retention rules in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Deletes a Recycle Bin retention rule. For more information, see \n Delete Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#http": { "method": "DELETE", "uri": "/rules/{Identifier}", @@ -217,7 +217,7 @@ "Identifier": { "target": "com.amazonaws.rbin#RuleIdentifier", "traits": { - "smithy.api#documentation": "The unique ID of the retention rule to delete.
", + "smithy.api#documentation": "The unique ID of the retention rule.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -290,31 +290,31 @@ "Description": { "target": "com.amazonaws.rbin#Description", "traits": { - "smithy.api#documentation": "The description assigned to the retention rule.
" + "smithy.api#documentation": "The retention rule description.
" } }, "ResourceType": { "target": "com.amazonaws.rbin#ResourceType", "traits": { - "smithy.api#documentation": "The resource type retained by the retention rule. Currently, only Amazon EBS snapshots are supported.
" + "smithy.api#documentation": "The resource type retained by the retention rule.
" } }, "RetentionPeriod": { "target": "com.amazonaws.rbin#RetentionPeriod", "traits": { - "smithy.api#documentation": "Information about the period for which the retention rule retains resources.
" + "smithy.api#documentation": "Information about the retention period for which the retention rule is to retain resources.
" } }, "ResourceTags": { "target": "com.amazonaws.rbin#ResourceTags", "traits": { - "smithy.api#documentation": "The resource tags used to identify resources that are to be retained by the retention rule.
" + "smithy.api#documentation": "Information about the resource tags used to identify resources that are retained by the retention \n rule.
" } }, "Status": { "target": "com.amazonaws.rbin#RuleStatus", "traits": { - "smithy.api#documentation": "The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
\n state retain resources.
The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken
value. This value can be between 5 and 500. If maxResults
is given a larger value than 500, you receive an error.
The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned NextToken
value.
The token to use to retrieve the next page of results.
" + "smithy.api#documentation": "The token for the next page of results.
" } }, "ResourceType": { "target": "com.amazonaws.rbin#ResourceType", "traits": { - "smithy.api#documentation": "The resource type retained by the retention rule. Only retention rules that retain the specified resource type \n are listed.
", + "smithy.api#documentation": "The resource type retained by the retention rule. Only retention rules that retain \n the specified resource type are listed. Currently, only Amazon EBS snapshots and EBS-backed \n AMIs are supported. To list retention rules that retain snapshots, specify \n EBS_SNAPSHOT
. To list retention rules that retain EBS-backed AMIs, specify \n EC2_IMAGE
.
The tags used to identify resources that are to be retained by the retention rule.
" + "smithy.api#documentation": "Information about the resource tags used to identify resources that are retained by the retention \n rule.
" } } } @@ -429,7 +429,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the tags assigned a specific resource.
", + "smithy.api#documentation": "Lists the tags assigned to a retention rule.
", "smithy.api#http": { "method": "GET", "uri": "/tags/{ResourceArn}", @@ -443,7 +443,7 @@ "ResourceArn": { "target": "com.amazonaws.rbin#RuleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource for which to list the tags.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the retention rule.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -456,7 +456,7 @@ "Tags": { "target": "com.amazonaws.rbin#TagList", "traits": { - "smithy.api#documentation": "Information about the tags assigned to the resource.
" + "smithy.api#documentation": "Information about the tags assigned to the retention rule.
" } } } @@ -525,7 +525,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about a resource tag used to identify resources that are to be retained by a Recycle Bin retention rule.
" + "smithy.api#documentation": "Information about the resource tags used to identify resources that are retained by the retention \n rule.
" } }, "com.amazonaws.rbin#ResourceTagKey": { @@ -559,6 +559,10 @@ { "value": "EBS_SNAPSHOT", "name": "EBS_SNAPSHOT" + }, + { + "value": "EC2_IMAGE", + "name": "EC2_IMAGE" } ] } @@ -582,7 +586,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about the retention period for which a retention rule is to retain resources.
" + "smithy.api#documentation": "Information about the retention period for which the retention rule is to retain resources.
" } }, "com.amazonaws.rbin#RetentionPeriodUnit": { @@ -649,13 +653,13 @@ "Description": { "target": "com.amazonaws.rbin#Description", "traits": { - "smithy.api#documentation": "The description for the retention rule.
" + "smithy.api#documentation": "The retention rule description.
" } }, "RetentionPeriod": { "target": "com.amazonaws.rbin#RetentionPeriod", "traits": { - "smithy.api#documentation": "Information about the retention period for which the retention rule retains resources
" + "smithy.api#documentation": "Information about the retention period for which the retention rule is to retain resources.
" } } }, @@ -718,7 +722,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about the tags assigned to a Recycle Bin retention rule.
" + "smithy.api#documentation": "Information about the tags to assign to the retention rule.
" } }, "com.amazonaws.rbin#TagKey": { @@ -778,7 +782,7 @@ } ], "traits": { - "smithy.api#documentation": "Assigns tags to the specified resource.
", + "smithy.api#documentation": "Assigns tags to the specified retention rule.
", "smithy.api#http": { "method": "POST", "uri": "/tags/{ResourceArn}", @@ -792,7 +796,7 @@ "ResourceArn": { "target": "com.amazonaws.rbin#RuleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to which to assign the tags.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the retention rule.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -800,7 +804,7 @@ "Tags": { "target": "com.amazonaws.rbin#TagList", "traits": { - "smithy.api#documentation": "Information about the tags to assign to the resource.
", + "smithy.api#documentation": "Information about the tags to assign to the retention rule.
", "smithy.api#required": {} } } @@ -840,7 +844,7 @@ } ], "traits": { - "smithy.api#documentation": "Unassigns a tag from a resource.
", + "smithy.api#documentation": "Unassigns a tag from a retention rule.
", "smithy.api#http": { "method": "DELETE", "uri": "/tags/{ResourceArn}", @@ -854,7 +858,7 @@ "ResourceArn": { "target": "com.amazonaws.rbin#RuleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource from which to unassign the tags.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the retention rule.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -862,7 +866,7 @@ "TagKeys": { "target": "com.amazonaws.rbin#TagKeyList", "traits": { - "smithy.api#documentation": "Information about the tags to unassign from the resource.
", + "smithy.api#documentation": "The tag keys of the tags to unassign. All tags that have the specified tag key are unassigned.
", "smithy.api#httpQuery": "tagKeys", "smithy.api#required": {} } @@ -893,7 +897,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing Recycle Bin retention rule. For more information, see \n Update Recycle Bin retention rules in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Updates an existing Recycle Bin retention rule. For more information, see \n Update Recycle Bin retention rules in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#http": { "method": "PATCH", "uri": "/rules/{Identifier}", @@ -907,7 +911,7 @@ "Identifier": { "target": "com.amazonaws.rbin#RuleIdentifier", "traits": { - "smithy.api#documentation": "The unique ID of the retention rule to update.
", + "smithy.api#documentation": "The unique ID of the retention rule.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -927,13 +931,13 @@ "ResourceType": { "target": "com.amazonaws.rbin#ResourceType", "traits": { - "smithy.api#documentation": "The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are supported.
" + "smithy.api#documentation": "The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots \n and EBS-backed AMIs are supported. To retain snapshots, specify EBS_SNAPSHOT
. To \n retain EBS-backed AMIs, specify EC2_IMAGE
.
Information about the resource tags to use to identify resources that are to be retained \n by the retention rule. The retention rule retains only deleted snapshots that have one or more \n of the specified tag key and value pairs. If a snapshot is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained \n by the retention rule.
\nYou can add the same tag key and value pair to a maximum or five retention rules.
" + "smithy.api#documentation": "Specifies the resource tags to use to identify resources that are to be retained by a \n tag-level retention rule. For tag-level retention rules, only deleted resources, of the specified resource type, that \n have one or more of the specified tag key and value pairs are retained. If a resource is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained by the retention rule.
\nYou can add the same tag key and value pair to a maximum or five retention rules.
\nTo create a Region-level retention rule, omit this parameter. A Region-level retention rule \n does not have any resource tags specified. It retains all deleted resources of the specified \n resource type in the Region in which the rule is created, even if the resources are not tagged.
" } } } @@ -971,7 +975,7 @@ "Status": { "target": "com.amazonaws.rbin#RuleStatus", "traits": { - "smithy.api#documentation": "The state of the retention rule. Only retention rules that are in the available
state retain snapshots.
The state of the retention rule. Only retention rules that are in the available
\n state retain resources.
Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific\n AMI. The only supported engine is Oracle Database 19c Enterprise Edition with the January 2021 or later\n RU/RUR.
\nAmazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software.\n The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create\n your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.
\nWhen you create a custom engine version, you specify the files in a JSON document called a CEV manifest. \n This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from \n the installation files that you provided. This service model is called Bring Your Own Media (BYOM).
\nCreation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196
with \n the message Creation failed for custom engine version
, and includes details about the failure. \n For example, the event prints missing files.
After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple \n RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or\n inactive.
\nThe MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with \n Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the \n CreateCustomDbEngineVersion
event aren't logged. However, you might see calls from the \n API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for \n the CreateCustomDbEngineVersion
event.
For more information, see \n Creating a CEV in the Amazon RDS User Guide.
" + "smithy.api#documentation": "Creates a custom DB engine version (CEV). A CEV is a binary volume snapshot of a database engine and specific\n AMI. The supported engines are the following:
\nOracle Database 12.1 Enterprise Edition with the January 2021 or later RU/RUR
\nOracle Database 19c Enterprise Edition with the January 2021 or later RU/RUR
\nAmazon RDS, which is a fully managed service, supplies the Amazon Machine Image (AMI) and database software.\n The Amazon RDS database software is preinstalled, so you need only select a DB engine and version, and create\n your database. With Amazon RDS Custom for Oracle, you upload your database installation files in Amazon S3.
\nWhen you create a custom engine version, you specify the files in a JSON document called a CEV manifest. \n This document describes installation .zip files stored in Amazon S3. RDS Custom creates your CEV from \n the installation files that you provided. This service model is called Bring Your Own Media (BYOM).
\nCreation takes approximately two hours. If creation fails, RDS Custom issues RDS-EVENT-0196
with \n the message Creation failed for custom engine version
, and includes details about the failure. \n For example, the event prints missing files.
After you create the CEV, it is available for use. You can create multiple CEVs, and create multiple \n RDS Custom instances from any CEV. You can also change the status of a CEV to make it available or\n inactive.
\nThe MediaImport service that imports files from Amazon S3 to create CEVs isn't integrated with \n Amazon Web Services CloudTrail. If you turn on data logging for Amazon RDS in CloudTrail, calls to the \n CreateCustomDbEngineVersion
event aren't logged. However, you might see calls from the \n API gateway that accesses your Amazon S3 bucket. These calls originate from the MediaImport service for \n the CreateCustomDbEngineVersion
event.
For more information, see \n Creating a CEV in the Amazon RDS User Guide.
" } }, "com.amazonaws.rds#CreateCustomDBEngineVersionMessage": { @@ -2151,7 +2151,7 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A DB subnet group to associate with this DB cluster.
\nThis setting is required to create a Multi-AZ DB cluster.
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mySubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" + "smithy.api#documentation": "A DB subnet group to associate with this DB cluster.
\nThis setting is required to create a Multi-AZ DB cluster.
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mydbsubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" } }, "Engine": { @@ -2652,7 +2652,7 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A DB subnet group to associate with this DB instance.
\nIf there is no DB subnet group, then it is a non-VPC DB instance.
" + "smithy.api#documentation": "A DB subnet group to associate with this DB instance.
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mydbsubnetgroup
\n
Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.
\nConstraints:
\nCan only be specified if the source DB instance identifier specifies a DB instance in another Amazon Web Services Region.
\nIf supplied, must match the name of an existing DBSubnetGroup.
\nThe specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running.
\nAll read replicas in one Amazon Web Services Region that are created from the same source DB\n instance must either:>
\nSpecify DB subnet groups from the same VPC. All these read replicas are created in the same\n VPC.
\nNot specify a DB subnet group. All these read replicas are created outside of any\n VPC.
\nExample: mySubnetgroup
\n
Specifies a DB subnet group for the DB instance. The new DB instance is created in the VPC associated with the DB subnet group. If no DB subnet group is specified, then the new DB instance isn't created in a VPC.
\nConstraints:
\nCan only be specified if the source DB instance identifier specifies a DB instance in another Amazon Web Services Region.
\nIf supplied, must match the name of an existing DBSubnetGroup.
\nThe specified DB subnet group must be in the same Amazon Web Services Region in which the operation is running.
\nAll read replicas in one Amazon Web Services Region that are created from the same source DB\n instance must either:>
\nSpecify DB subnet groups from the same VPC. All these read replicas are created in the same\n VPC.
\nNot specify a DB subnet group. All these read replicas are created outside of any\n VPC.
\nExample: mydbsubnetgroup
\n
The name for the DB subnet group. This value is stored as a lowercase string.
\nConstraints: Must contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens. Must not be default.
\nExample: mySubnetgroup
\n
The name for the DB subnet group. This value is stored as a lowercase string.
\nConstraints:
\nMust contain no more than 255 letters, numbers, periods, underscores, spaces, or hyphens.
\nMust not be default.
\nFirst character must be a letter.
\nExample: mydbsubnetgroup
\n
The name of the database subnet group to delete.
\nYou can't delete the default subnet group.
\nConstraints:
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mySubnetgroup
\n
The name of the database subnet group to delete.
\nYou can't delete the default subnet group.
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mydbsubnetgroup
\n
The new DB subnet group for the DB instance.\n You can use this parameter to move your DB instance to a different VPC.\n \n If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC.\n For more information, see \n Working with a DB instance in a VPC \n in the Amazon RDS User Guide.\n
\nChanging the subnet group causes an outage during the change. \n The change is applied during the next maintenance window,\n unless you enable ApplyImmediately
.
This parameter doesn't apply to RDS Custom.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mySubnetGroup
\n
The new DB subnet group for the DB instance.\n You can use this parameter to move your DB instance to a different VPC.\n \n If your DB instance isn't in a VPC, you can also use this parameter to move your DB instance into a VPC.\n For more information, see \n Working with a DB instance in a VPC \n in the Amazon RDS User Guide.\n
\nChanging the subnet group causes an outage during the change. \n The change is applied during the next maintenance window,\n unless you enable ApplyImmediately
.
This parameter doesn't apply to RDS Custom.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
The name for the DB subnet group. This value is stored as a lowercase string.\n You can't modify the default subnet group.\n
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mySubnetgroup
\n
The name for the DB subnet group. This value is stored as a lowercase string.\n You can't modify the default subnet group.\n
\nConstraints: Must match the name of an existing DBSubnetGroup. Must not be default.
\nExample: mydbsubnetgroup
\n
A DB subnet group to associate with the restored DB cluster.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.\n
\nExample: mySubnetgroup
\n
A DB subnet group to associate with the restored DB cluster.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
The name of the DB subnet group to use for the new DB cluster.
\nConstraints: If supplied, must match the name of an existing DB subnet group.
\nExample: mySubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" + "smithy.api#documentation": "The name of the DB subnet group to use for the new DB cluster.
\nConstraints: If supplied, must match the name of an existing DB subnet group.
\nExample: mydbsubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" } }, "DatabaseName": { @@ -18131,7 +18131,7 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The DB subnet group name to use for the new DB cluster.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mySubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" + "smithy.api#documentation": "The DB subnet group name to use for the new DB cluster.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
Valid for: Aurora DB clusters and Multi-AZ DB clusters
" } }, "OptionGroupName": { @@ -18364,7 +18364,7 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The DB subnet group name to use for the new instance.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mySubnetgroup
\n
The DB subnet group name to use for the new instance.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance. By default, tags are not copied.
" + "smithy.api#documentation": "A value that indicates whether to copy all tags from the restored DB instance to snapshots of the DB instance.
\nIn most cases, tags aren't copied by default. However, when you restore a DB instance from a DB snapshot, RDS checks whether you \n specify new tags. If yes, the new tags are added to the restored DB instance. If there are no new tags, RDS looks for the tags from\n the source DB instance for the DB snapshot, and then adds those tags to the restored DB instance.
\nFor more information, see \n Copying tags to DB instance snapshots in the Amazon RDS User Guide.
" } }, "DomainIAMRoleName": { @@ -18505,7 +18505,7 @@ "CustomIamInstanceProfile": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The instance profile associated with the underlying Amazon EC2 instance of an \n RDS Custom DB instance. The instance profile must meet the following requirements:
\nThe profile must exist in your account.
\nThe profile must have an IAM role that Amazon EC2 has permissions to assume.
\nThe instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom
.
For the list of permissions required for the IAM role, see \n \n Configure IAM and your VPC in the Amazon Relational Database Service\n User Guide.
\nThis setting is required for RDS Custom.
" + "smithy.api#documentation": "The instance profile associated with the underlying Amazon EC2 instance of an \n RDS Custom DB instance. The instance profile must meet the following requirements:
\nThe profile must exist in your account.
\nThe profile must have an IAM role that Amazon EC2 has permissions to assume.
\nThe instance profile name and the associated IAM role name must start with the prefix AWSRDSCustom
.
For the list of permissions required for the IAM role, see \n \n Configure IAM and your VPC in the Amazon RDS User Guide.
\nThis setting is required for RDS Custom.
" } }, "BackupTarget": { @@ -18661,7 +18661,7 @@ "DBSubnetGroupName": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "A DB subnet group to associate with this DB instance.
" + "smithy.api#documentation": "A DB subnet group to associate with this DB instance.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
The DB subnet group name to use for the new instance.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mySubnetgroup
\n
The DB subnet group name to use for the new instance.
\nConstraints: If supplied, must match the name of an existing DBSubnetGroup.
\nExample: mydbsubnetgroup
\n
SNS has responded that there is a problem with the SND topic specified.
", + "smithy.api#documentation": "SNS has responded that there is a problem with the SNS topic specified.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } diff --git a/codegen/sdk-codegen/aws-models/robomaker.json b/codegen/sdk-codegen/aws-models/robomaker.json index 2d2c073576418..66701a5215130 100644 --- a/codegen/sdk-codegen/aws-models/robomaker.json +++ b/codegen/sdk-codegen/aws-models/robomaker.json @@ -239,7 +239,10 @@ } ], "traits": { - "smithy.api#documentation": "Cancels the specified deployment job.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Cancels the specified deployment job.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nDeploys a specific version of a robot application to robots in a fleet.
\nThe robot application must have a numbered applicationVersion
for\n consistency reasons. To create a new version, use\n CreateRobotApplicationVersion
or see Creating a Robot Application Version.
After 90 days, deployment jobs expire and will be deleted. They will no longer be\n accessible.
\nDeploys a specific version of a robot application to robots in a fleet.
\nThis API is no longer supported and will throw an error if used.
\nThe robot application must have a numbered applicationVersion
for\n consistency reasons. To create a new version, use\n CreateRobotApplicationVersion
or see Creating a Robot Application Version.
After 90 days, deployment jobs expire and will be deleted. They will no longer be\n accessible.
\nA map that contains tag keys and tag values that are attached to the deployment\n job.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateDeploymentJobResponse": { @@ -693,6 +714,11 @@ "smithy.api#documentation": "The list of all tags added to the deployment job.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateFleet": { @@ -718,7 +744,10 @@ } ], "traits": { - "smithy.api#documentation": "Creates a fleet, a logical group of robots running the same robot application.
", + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Creates a fleet, a logical group of robots running the same robot application.
\nThis API is no longer supported and will throw an error if used.
\nA map that contains tag keys and tag values that are attached to the fleet.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateFleetResponse": { @@ -771,6 +805,11 @@ "smithy.api#documentation": "The list of all tags added to the fleet.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateRobot": { @@ -799,7 +838,10 @@ } ], "traits": { - "smithy.api#documentation": "Creates a robot.
", + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Creates a robot.
\nThis API is no longer supported and will throw an error if used.
\nA map that contains tag keys and tag values that are attached to the robot.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateRobotResponse": { @@ -1128,6 +1175,11 @@ "smithy.api#documentation": "The list of all tags added to the robot.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#CreateSimulationApplication": { @@ -2176,7 +2228,10 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a fleet.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Deletes a fleet.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nDeletes a robot.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Deletes a robot.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nDeregisters a robot.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Deregisters a robot.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nThe Amazon Resource Name (ARN) of the robot.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#DescribeDeploymentJob": { @@ -2812,7 +2903,10 @@ } ], "traits": { - "smithy.api#documentation": "Describes a deployment job.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Describes a deployment job.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nThe list of all tags added to the specified deployment job.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#DescribeFleet": { @@ -2920,7 +3024,10 @@ } ], "traits": { - "smithy.api#documentation": "Describes a fleet.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Describes a fleet.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nThe list of all tags added to the specified fleet.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#DescribeRobot": { @@ -3016,7 +3133,10 @@ } ], "traits": { - "smithy.api#documentation": "Describes a robot.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Describes a robot.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nThe list of all tags added to the specified robot.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#DescribeSimulationApplication": { @@ -4559,7 +4689,10 @@ } ], "traits": { - "smithy.api#documentation": "Returns a list of deployment jobs for a fleet. You can optionally provide filters to\n retrieve specific deployment jobs.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Returns a list of deployment jobs for a fleet. You can optionally provide filters to retrieve specific deployment jobs.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nWhen this parameter is used, ListDeploymentJobs
only returns\n maxResults
results in a single page along with a nextToken
\n response element. The remaining results of the initial request can be seen by sending\n another ListDeploymentJobs
request with the returned nextToken
\n value. This value can be between 1 and 200. If this parameter is not used, then\n ListDeploymentJobs
returns up to 200 results and a nextToken
\n value if applicable.
If the previous paginated request did not return all of the remaining results, the\n response object's nextToken
parameter value is set to a token. To retrieve the\n next set of results, call ListDeploymentJobs
again and assign that token to\n the request object's nextToken
parameter. If there are no remaining results,\n the previous response object's NextToken parameter is set to null.
Returns a list of fleets. You can optionally provide filters to retrieve specific\n fleets.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Returns a list of fleets. You can optionally provide filters to retrieve specific fleets.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nOptional filters to limit results.
\nThe filter name name
is supported. When filtering, you must use the\n complete value of the filtered item. You can use up to three filters.
If the previous paginated request did not return all of the remaining results, the\n response object's nextToken
parameter value is set to a token. To retrieve the\n next set of results, call ListFleets
again and assign that token to the\n request object's nextToken
parameter. If there are no remaining results, the\n previous response object's NextToken parameter is set to null.
Returns a list of robots. You can optionally provide filters to retrieve specific\n robots.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Returns a list of robots. You can optionally provide filters to retrieve specific robots.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nOptional filters to limit results.
\nThe filter names status
and fleetName
are supported. When\n filtering, you must use the complete value of the filtered item. You can use up to three\n filters, but they must be for the same named item. For example, if you are looking for\n items with the status Registered
or the status Available
.
If the previous paginated request did not return all of the remaining results, the\n response object's nextToken
parameter value is set to a token. To retrieve the\n next set of results, call ListRobots
again and assign that token to the\n request object's nextToken
parameter. If there are no remaining results, the\n previous response object's NextToken parameter is set to null.
Registers a robot with a fleet.
", + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Registers a robot with a fleet.
\nThis API is no longer supported and will throw an error if used.
\nInformation about the robot registration.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "AWS RoboMaker is unable to process this request as the support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#RenderingEngine": { @@ -7377,7 +7559,10 @@ } ], "traits": { - "smithy.api#documentation": "Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were\n added after a deployment.
", + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + }, + "smithy.api#documentation": "Syncrhonizes robots in a fleet to the latest deployment. This is helpful if robots were added after a deployment.
\nThis API will no longer be supported as of May 2, 2022. Use it to remove resources that were created for Deployment Service.
\nThe time, in milliseconds since the epoch, when the fleet was created.
" } } + }, + "traits": { + "smithy.api#deprecated": { + "message": "Support for the AWS RoboMaker application deployment feature has ended. For additional information, see https://docs.aws.amazon.com/robomaker/latest/dg/fleets.html." + } } }, "com.amazonaws.robomaker#TagKey": { @@ -8620,6 +8815,21 @@ }, "com.amazonaws.robomaker#robomaker": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "RoboMaker", + "arnNamespace": "robomaker", + "cloudFormationName": "RoboMaker", + "cloudTrailEventSource": "robomaker.amazonaws.com", + "endpointPrefix": "robomaker" + }, + "aws.auth#sigv4": { + "name": "robomaker" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "This section provides documentation for the AWS RoboMaker API operations.
", + "smithy.api#title": "AWS RoboMaker" + }, "version": "2018-06-29", "operations": [ { @@ -8793,22 +9003,7 @@ { "target": "com.amazonaws.robomaker#UpdateWorldTemplate" } - ], - "traits": { - "aws.api#service": { - "sdkId": "RoboMaker", - "arnNamespace": "robomaker", - "cloudFormationName": "RoboMaker", - "cloudTrailEventSource": "robomaker.amazonaws.com", - "endpointPrefix": "robomaker" - }, - "aws.auth#sigv4": { - "name": "robomaker" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "This section provides documentation for the AWS RoboMaker API operations.
", - "smithy.api#title": "AWS RoboMaker" - } + ] } } } diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index d75912f2426fa..4ecaa8b70f5fd 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -2873,6 +2873,7 @@ }, "connect": { "endpoints": { + "af-south-1": {}, "ap-northeast-1": {}, "ap-northeast-2": {}, "ap-southeast-1": {}, @@ -12883,6 +12884,32 @@ } } }, + "synthetics": { + "endpoints": { + "af-south-1": {}, + "ap-east-1": {}, + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-northeast-3": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ap-southeast-3": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-south-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "me-south-1": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, "tagging": { "endpoints": { "af-south-1": {}, @@ -14222,6 +14249,12 @@ "cn-northwest-1": {} } }, + "codepipeline": { + "endpoints": { + "cn-north-1": {}, + "cn-northwest-1": {} + } + }, "cognito-identity": { "endpoints": { "cn-north-1": {} @@ -14957,6 +14990,12 @@ "cn-northwest-1": {} } }, + "synthetics": { + "endpoints": { + "cn-north-1": {}, + "cn-northwest-1": {} + } + }, "tagging": { "endpoints": { "cn-north-1": {}, @@ -18538,6 +18577,12 @@ } } }, + "synthetics": { + "endpoints": { + "us-gov-east-1": {}, + "us-gov-west-1": {} + } + }, "tagging": { "endpoints": { "us-gov-east-1": {}, @@ -19265,6 +19310,11 @@ "us-iso-west-1": {} } }, + "synthetics": { + "endpoints": { + "us-iso-east-1": {} + } + }, "transcribe": { "defaults": { "protocols": ["https"] @@ -19639,6 +19689,11 @@ "us-isob-east-1": {} } }, + "synthetics": { + "endpoints": { + "us-isob-east-1": {} + } + }, "tagging": { "endpoints": { "us-isob-east-1": {}