From 8fa517a2c56d2f98a2e4a9c4ea6fd99b6ce61a71 Mon Sep 17 00:00:00 2001
From: AllanZhengYP The basic authorization credentials for the autocreated branch. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for the autocreated branch. You must
+ * base64-encode the authorization credentials and provide them in the format
+ * Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * the edge for a longer interval. When performance mode is enabled, hosting configuration
+ * or code changes can take up to 10 minutes to roll out. Represents a 301 (moved pemanently) redirect rule. This and all future requests should be directed to the target URL. Represents a 301 (moved pemanently) redirect rule. This and all future
+ * requests should be directed to the target URL. The credentials for basic authorization for an Amplify app. You must base64-encode the authorization credentials and provide them in the format The credentials for basic authorization for an Amplify app. You must base64-encode
+ * the authorization credentials and provide them in the format
+ * Represents the different branches of a repository for building, deploying, and
* hosting an Amplify app. The repository for the Amplify app. The Git repository for the Amplify app. The basic authorization credentials for branches for the Amplify app. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for branches for the Amplify app. You must
+ * base64-encode the authorization credentials and provide them in the format
+ * Describes the automated branch creation configuration for the Amplify app. The authentication protocol to use to access the Git repository for an Amplify app.
+ * For a GitHub repository, specify The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for the branch. You must base64-encode the
+ * authorization credentials and provide them in the format
+ * Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * the edge for a longer interval. When performance mode is enabled, hosting configuration
+ * or code changes can take up to 10 minutes to roll out. Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * the edge for a longer interval. When performance mode is enabled, hosting configuration
+ * or code changes can take up to 10 minutes to roll out. The basic authorization credentials for a branch of an Amplify app. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for a branch of an Amplify app. You must
+ * base64-encode the authorization credentials and provide them in the format
+ * The basic authorization credentials for an Amplify app. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for an Amplify app. You must base64-encode the
+ * authorization credentials and provide them in the format
+ * The basic authorization credentials for the branch. You must base64-encode the authorization credentials and provide them in the format The basic authorization credentials for the branch. You must base64-encode the
+ * authorization credentials and provide them in the format
+ * Enables performance mode for the branch. Performance mode optimizes for faster hosting performance by keeping content cached at the edge for a longer interval. When performance mode is enabled, hosting configuration or code changes can take up to 10 minutes to roll out. Performance mode optimizes for faster hosting performance by keeping content cached at
+ * the edge for a longer interval. When performance mode is enabled, hosting configuration
+ * or code changes can take up to 10 minutes to roll out. Describes the settings for the subdomain. Sets the branch patterns for automatic subdomain creation. The Amplify UI Builder API provides a programmatic interface for creating and configuring
-user interface (UI) component libraries and themes for use in your Amplify applications. You
-can then connect these UI components to an application's backend Amazon Web Services
-resources. You can also use the Amplify Studio visual designer to create UI components and model data
-for an app. For more information, see Introduction in the
+ The Amplify UI Builder API provides a programmatic interface for creating
+and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's
+backend Amazon Web Services resources. You can also use the Amplify Studio visual designer to create UI components
+and model data for an app. For more information, see Introduction in the
Amplify Docs. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation
-for client app development. For more information, see the Amplify Framework. For more information about
-deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and
+documentation for client app development. For more information, see the Amplify Framework. For more information about
+deploying an Amplify application to Amazon Web Services, see the Amplify User Guide. The Amplify UI Builder API provides a programmatic interface for creating and configuring
- * user interface (UI) component libraries and themes for use in your Amplify applications. You
- * can then connect these UI components to an application's backend Amazon Web Services
- * resources. You can also use the Amplify Studio visual designer to create UI components and model data
- * for an app. For more information, see Introduction in the
+ * The Amplify UI Builder API provides a programmatic interface for creating
+ * and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's
+ * backend Amazon Web Services resources. You can also use the Amplify Studio visual designer to create UI components
+ * and model data for an app. For more information, see Introduction in the
* Amplify Docs. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation
- * for client app development. For more information, see the Amplify Framework. For more information about
- * deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and
+ * documentation for client app development. For more information, see the Amplify Framework. For more information about
+ * deploying an Amplify application to Amazon Web Services, see the Amplify User Guide. Exports component configurations to code that is ready to integrate into an Amplify
- * app. Exports component configurations to code that is ready to integrate into an Amplify app. Exports theme configurations to code that is ready to integrate into an Amplify
- * app. Exports theme configurations to code that is ready to integrate into an Amplify app. Retrieves a list of components for a specified Amplify app and backend environment. Retrieves a list of components for a specified Amplify app and backend
+ * environment. Retrieves a list of themes for a specified Amplify app and backend environment. Retrieves a list of themes for a specified Amplify app and backend
+ * environment. The Amplify UI Builder API provides a programmatic interface for creating and configuring
- * user interface (UI) component libraries and themes for use in your Amplify applications. You
- * can then connect these UI components to an application's backend Amazon Web Services
- * resources. You can also use the Amplify Studio visual designer to create UI components and model data
- * for an app. For more information, see Introduction in the
+ * The Amplify UI Builder API provides a programmatic interface for creating
+ * and configuring user interface (UI) component libraries and themes for use in your Amplify applications. You can then connect these UI components to an application's
+ * backend Amazon Web Services resources. You can also use the Amplify Studio visual designer to create UI components
+ * and model data for an app. For more information, see Introduction in the
* Amplify Docs. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and documentation
- * for client app development. For more information, see the Amplify Framework. For more information about
- * deploying an Amplify application to Amazon Web Services, see the Amplify Console User Guide. The Amplify Framework is a comprehensive set of SDKs, libraries, tools, and
+ * documentation for client app development. For more information, see the Amplify Framework. For more information about
+ * deploying an Amplify application to Amazon Web Services, see the Amplify User Guide. Exports component configurations to code that is ready to integrate into an Amplify
- * app. Exports component configurations to code that is ready to integrate into an Amplify app. Exports theme configurations to code that is ready to integrate into an Amplify
- * app. Exports theme configurations to code that is ready to integrate into an Amplify app. Retrieves a list of components for a specified Amplify app and backend environment. Retrieves a list of components for a specified Amplify app and backend
+ * environment. Retrieves a list of themes for a specified Amplify app and backend environment. Retrieves a list of themes for a specified Amplify app and backend
+ * environment. The combination of variants that comprise this variant. The combination of variants that comprise this variant. You can't specify
+ * The properties of the component variant that can be overriden when customizing an instance
- * of the component.user:password
.user:password
.user:password
.user:password
.user:password
.user:password
.TOKEN
. For an Amazon Web Services CodeCommit repository,
+ * specify SIGV4
. For GitLab and Bitbucket repositories, specify
+ * SSH
.user:password
.user:password
.user:password
.user:password
.user:password
.user:password
.user:password
.user:password
.tags
as a valid property for variantValues
.tags
as a valid property for
+ * overrides
.
The unique ID of the Amplify app associated with the component to delete.
+ *The unique ID of the Amplify app associated with the component to + * delete.
*/ appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -232,9 +236,15 @@ export interface ExportComponentsRequest { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; + + /** + *The token to request the next page of results.
+ */ + nextToken?: string; } export namespace ExportComponentsRequest { @@ -279,7 +289,8 @@ export interface ListComponentsRequest { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -314,7 +325,8 @@ export interface ComponentSummary { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -514,12 +526,14 @@ export namespace RefreshTokenResponse { export interface DeleteThemeRequest { /** - *The unique ID of the Amplify app associated with the theme to delete.
+ *The unique ID of the Amplify app associated with the theme to + * delete.
*/ appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -548,6 +562,11 @@ export interface ExportThemesRequest { *The name of the backend environment that is part of the Amplify app.
*/ environmentName: string | undefined; + + /** + *The token to request the next page of results.
+ */ + nextToken?: string; } export namespace ExportThemesRequest { @@ -592,7 +611,8 @@ export interface ListThemesRequest { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -720,8 +740,8 @@ export namespace ThemeValues { } /** - *Stores information for generating Amplify DataStore queries. Use a Predicate
- * to retrieve a subset of the data in a collection.
Stores information for generating Amplify DataStore queries. Use a
+ * Predicate
to retrieve a subset of the data in a collection.
Describes the configuration for all of a component's properties. Use
- * ComponentProperty
to specify the values to render or bind by
- * default.
ComponentProperty
to specify the values to render or bind by default.
*/
export interface ComponentProperty {
/**
@@ -864,7 +883,8 @@ export interface ComponentProperty {
bindingProperties?: ComponentPropertyBindingProperties;
/**
- * The information to bind the component property to data at runtime. Use this for collection components.
+ *The information to bind the component property to data at runtime. Use this for collection + * components.
*/ collectionBindingProperties?: ComponentPropertyBindingProperties; @@ -894,17 +914,19 @@ export interface ComponentProperty { userAttribute?: string; /** - *A list of component properties to concatenate to create the value to assign to this component property.
+ *A list of component properties to concatenate to create the value to assign to this + * component property.
*/ concat?: ComponentProperty[]; /** - *The conditional expression to use to assign a value to the component property..
+ *The conditional expression to use to assign a value to the component property.
*/ condition?: ComponentConditionProperty; /** - *Specifies whether the user configured the property in Amplify Studio after importing it.
+ *Specifies whether the user configured the property in Amplify Studio after + * importing it.
*/ configured?: boolean; @@ -914,9 +936,20 @@ export interface ComponentProperty { type?: string; /** - *The default value assigned to property when the component is imported into an app.
+ *The default value assigned to the property when the component is imported into an + * app.
*/ importedValue?: string; + + /** + *The name of the component that is affected by an event.
+ */ + componentName?: string; + + /** + *The name of the component's property that is affected by an event.
+ */ + property?: string; } export namespace ComponentProperty { @@ -1006,7 +1039,8 @@ export interface Theme { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -1089,6 +1123,36 @@ export namespace UpdateThemeData { }); } +/** + *Represents the state configuration when an action modifies a property of another + * element within the same component.
+ */ +export interface MutationActionSetStateParameter { + /** + *The name of the component that is being modified.
+ */ + componentName: string | undefined; + + /** + *The name of the component property to apply the state configuration to.
+ */ + property: string | undefined; + + /** + *The state configuration to assign to the property.
+ */ + set: ComponentProperty | undefined; +} + +export namespace MutationActionSetStateParameter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MutationActionSetStateParameter): any => ({ + ...obj, + }); +} + /** *Represents a conditional expression to set a component property. Use
* ComponentConditionProperty
to set a property to different values conditionally,
@@ -1125,6 +1189,11 @@ export interface ComponentConditionProperty {
*
The value to assign to the property if the condition is not met.
*/ else?: ComponentProperty; + + /** + *The type of the property to evaluate.
+ */ + operandType?: string; } export namespace ComponentConditionProperty { @@ -1143,7 +1212,8 @@ export interface CreateThemeRequest { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -1256,6 +1326,11 @@ export interface ExportThemesResponse { *Represents the configuration of the exported themes.
*/ entities: Theme[] | undefined; + + /** + *The pagination token that's included if more results are available.
+ */ + nextToken?: string; } export namespace ExportThemesResponse { @@ -1267,6 +1342,96 @@ export namespace ExportThemesResponse { }); } +/** + *Represents the event action configuration for an element of a Component
+ * or ComponentChild
. Use for the workflow feature in Amplify Studio
+ * that allows you to bind events and actions to components. ActionParameters
+ * defines the action that is performed when an event occurs on the component.
The type of navigation action. Valid values are url
and anchor
. This value is required for a navigation action.
The URL to the location to open. Specify this value for a navigation action.
+ */ + url?: ComponentProperty; + + /** + *The HTML anchor link to the location to open. Specify this value for a navigation action.
+ */ + anchor?: ComponentProperty; + + /** + *The element within the same component to modify when the action occurs.
+ */ + target?: ComponentProperty; + + /** + *Specifies whether the user should be signed out globally. Specify this value for an auth sign out action.
+ */ + global?: ComponentProperty; + + /** + *The name of the data model. Use when the action performs an operation on an Amplify DataStore + * model.
+ */ + model?: string; + + /** + *The unique ID of the component that the ActionParameters
apply to.
A dictionary of key-value pairs mapping Amplify Studio properties to fields in a data model. Use when the action + * performs an operation on an Amplify DataStore model.
+ */ + fields?: { [key: string]: ComponentProperty }; + + /** + *A key-value pair that specifies the state property name and its initial value.
+ */ + state?: MutationActionSetStateParameter; +} + +export namespace ActionParameters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ActionParameters): any => ({ + ...obj, + }); +} + +/** + *Describes the configuration of an event. You can bind an event and a corresponding
+ * action to a Component
or a ComponentChild
. A button click
+ * is an example of an event.
The action to perform when a specific event is raised.
+ */ + action?: string; + + /** + *Describes information about the action.
+ */ + parameters?: ActionParameters; +} + +export namespace ComponentEvent { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentEvent): any => ({ + ...obj, + }); +} + /** *A nested UI configuration within a parent Component
.
Describes the properties of the child component.
+ *Describes the properties of the child component. You can't specify tags
as a
+ * valid property for properties
.
The list of ComponentChild
instances for this component.
Describes the events that can be raised on the child component. Use for the workflow feature in Amplify Studio that allows you to + * bind events and actions to components.
+ */ + events?: { [key: string]: ComponentEvent }; } export namespace ComponentChild { @@ -1302,10 +1474,10 @@ export namespace ComponentChild { } /** - *Contains the configuration settings for a user interface (UI) element for an Amplify app. A
- * component is configured as a primary, stand-alone UI element. Use ComponentChild
- * to configure an instance of a Component
. A ComponentChild
instance
- * inherits the configuration of the main Component
.
Contains the configuration settings for a user interface (UI) element for an Amplify app. A component is configured as a primary, stand-alone UI element. Use
+ * ComponentChild
to configure an instance of a Component
. A
+ * ComponentChild
instance inherits the configuration of the main
+ * Component
.
The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -1334,13 +1507,14 @@ export interface Component { name: string | undefined; /** - *The type of the component. This can be an Amplify custom UI component or another custom - * component.
+ *The type of the component. This can be an Amplify custom UI component or + * another custom component.
*/ componentType: string | undefined; /** - *Describes the component's properties.
+ *Describes the component's properties. You can't specify tags
as a valid
+ * property for properties
.
A list of the component's variants. A variant is a unique style configuration of a - * main component.
+ *A list of the component's variants. A variant is a unique style configuration of a main + * component.
*/ variants: ComponentVariant[] | undefined; /** *Describes the component's properties that can be overriden in a customized instance of the - * component.
+ * component. You can't specifytags
as a valid property for
+ * overrides
.
*/
overrides: { [key: string]: { [key: string]: string } } | undefined;
/**
- * The information to connect a component's properties to data at runtime.
+ *The information to connect a component's properties to data at runtime. You can't specify
+ * tags
as a valid property for bindingProperties
.
The data binding configuration for the component's properties. Use this for a collection component.
+ *The data binding configuration for the component's properties. Use this for a collection
+ * component. You can't specify tags
as a valid property for
+ * collectionProperties
.
One or more key-value pairs to use when tagging the component.
*/ tags?: { [key: string]: string }; + + /** + *Describes the events that can be raised on the component. Use for the workflow feature in Amplify Studio that allows you to + * bind events and actions to components.
+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *The schema version of the component when it was imported.
+ */ + schemaVersion?: string; } export namespace Component { @@ -1411,8 +1601,8 @@ export interface CreateComponentData { sourceId?: string; /** - *The component type. This can be an Amplify custom UI component or another custom - * component.
+ *The component type. This can be an Amplify custom UI component or another + * custom component.
*/ componentType: string | undefined; @@ -1443,7 +1633,8 @@ export interface CreateComponentData { bindingProperties: { [key: string]: ComponentBindingPropertiesValue } | undefined; /** - *The data binding configuration for customizing a component's properties. Use this for a collection component.
+ *The data binding configuration for customizing a component's properties. Use this for a + * collection component.
*/ collectionProperties?: { [key: string]: ComponentDataConfiguration }; @@ -1451,6 +1642,16 @@ export interface CreateComponentData { *One or more key-value pairs to use when tagging the component data.
*/ tags?: { [key: string]: string }; + + /** + *The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.
+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *The schema version of the component when it was imported.
+ */ + schemaVersion?: string; } export namespace CreateComponentData { @@ -1482,8 +1683,8 @@ export interface UpdateComponentData { sourceId?: string; /** - *The type of the component. This can be an Amplify custom UI component or another custom - * component.
+ *The type of the component. This can be an Amplify custom UI component or + * another custom component.
*/ componentType?: string; @@ -1513,9 +1714,20 @@ export interface UpdateComponentData { bindingProperties?: { [key: string]: ComponentBindingPropertiesValue }; /** - *The configuration for binding a component's properties to a data model. Use this for a collection component.
+ *The configuration for binding a component's properties to a data model. Use this for a + * collection component.
*/ collectionProperties?: { [key: string]: ComponentDataConfiguration }; + + /** + *The event configuration for the component. Use for the workflow feature in Amplify Studio that allows you to bind events and actions to components.
+ */ + events?: { [key: string]: ComponentEvent }; + + /** + *The schema version of the component when it was imported.
+ */ + schemaVersion?: string; } export namespace UpdateComponentData { @@ -1534,7 +1746,8 @@ export interface CreateComponentRequest { appId: string | undefined; /** - *The name of the backend environment that is a part of the Amplify app.
+ *The name of the backend environment that is a part of the Amplify + * app.
*/ environmentName: string | undefined; @@ -1647,6 +1860,11 @@ export interface ExportComponentsResponse { *Represents the configuration of the exported components.
*/ entities: Component[] | undefined; + + /** + *The pagination token that's included if more results are available.
+ */ + nextToken?: string; } export namespace ExportComponentsResponse { diff --git a/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts b/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts new file mode 100644 index 000000000000..e7186e0d130b --- /dev/null +++ b/clients/client-amplifyuibuilder/src/pagination/ExportComponentsPaginator.ts @@ -0,0 +1,58 @@ +import { Paginator } from "@aws-sdk/types"; + +import { AmplifyUIBuilder } from "../AmplifyUIBuilder"; +import { AmplifyUIBuilderClient } from "../AmplifyUIBuilderClient"; +import { + ExportComponentsCommand, + ExportComponentsCommandInput, + ExportComponentsCommandOutput, +} from "../commands/ExportComponentsCommand"; +import { AmplifyUIBuilderPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: AmplifyUIBuilderClient, + input: ExportComponentsCommandInput, + ...args: any +): PromiseThe properties that Amazon AppFlow applies when you use Marketo as a flow destination.
+ */ +export interface MarketoDestinationProperties { + /** + *The object specified in the Marketo flow destination.
+ */ + object: string | undefined; + + /** + * The settings that determine how Amazon AppFlow handles an error when placing data in the
+ * destination. For example, this setting would determine if the flow should fail after one
+ * insertion error, or continue and attempt to insert every record regardless of the initial
+ * failure. ErrorHandlingConfig
is a part of the destination connector details.
+ *
The properties that are applied when Amazon Redshift is being used as a destination. *
@@ -4124,6 +4152,11 @@ export interface DestinationConnectorProperties { */ Zendesk?: ZendeskDestinationProperties; + /** + *The properties required to query Marketo.
+ */ + Marketo?: MarketoDestinationProperties; + /** *The properties that are required to query the custom Connector.
*/ diff --git a/clients/client-appflow/src/protocols/Aws_restJson1.ts b/clients/client-appflow/src/protocols/Aws_restJson1.ts index b06faa1914f5..c8c94db02475 100644 --- a/clients/client-appflow/src/protocols/Aws_restJson1.ts +++ b/clients/client-appflow/src/protocols/Aws_restJson1.ts @@ -142,6 +142,7 @@ import { LookoutMetricsDestinationProperties, MarketoConnectorProfileCredentials, MarketoConnectorProfileProperties, + MarketoDestinationProperties, MarketoMetadata, MarketoSourceProperties, OAuth2Credentials, @@ -2752,6 +2753,10 @@ const serializeAws_restJson1DestinationConnectorProperties = ( input.LookoutMetrics !== null && { LookoutMetrics: serializeAws_restJson1LookoutMetricsDestinationProperties(input.LookoutMetrics, context), }), + ...(input.Marketo !== undefined && + input.Marketo !== null && { + Marketo: serializeAws_restJson1MarketoDestinationProperties(input.Marketo, context), + }), ...(input.Redshift !== undefined && input.Redshift !== null && { Redshift: serializeAws_restJson1RedshiftDestinationProperties(input.Redshift, context), @@ -3015,6 +3020,19 @@ const serializeAws_restJson1MarketoConnectorProfileProperties = ( }; }; +const serializeAws_restJson1MarketoDestinationProperties = ( + input: MarketoDestinationProperties, + context: __SerdeContext +): any => { + return { + ...(input.errorHandlingConfig !== undefined && + input.errorHandlingConfig !== null && { + errorHandlingConfig: serializeAws_restJson1ErrorHandlingConfig(input.errorHandlingConfig, context), + }), + ...(input.object !== undefined && input.object !== null && { object: input.object }), + }; +}; + const serializeAws_restJson1MarketoSourceProperties = ( input: MarketoSourceProperties, context: __SerdeContext @@ -4525,6 +4543,10 @@ const deserializeAws_restJson1DestinationConnectorProperties = ( output.LookoutMetrics !== undefined && output.LookoutMetrics !== null ? deserializeAws_restJson1LookoutMetricsDestinationProperties(output.LookoutMetrics, context) : undefined, + Marketo: + output.Marketo !== undefined && output.Marketo !== null + ? deserializeAws_restJson1MarketoDestinationProperties(output.Marketo, context) + : undefined, Redshift: output.Redshift !== undefined && output.Redshift !== null ? deserializeAws_restJson1RedshiftDestinationProperties(output.Redshift, context) @@ -4918,6 +4940,19 @@ const deserializeAws_restJson1MarketoConnectorProfileProperties = ( } as any; }; +const deserializeAws_restJson1MarketoDestinationProperties = ( + output: any, + context: __SerdeContext +): MarketoDestinationProperties => { + return { + errorHandlingConfig: + output.errorHandlingConfig !== undefined && output.errorHandlingConfig !== null + ? deserializeAws_restJson1ErrorHandlingConfig(output.errorHandlingConfig, context) + : undefined, + object: __expectString(output.object), + } as any; +}; + const deserializeAws_restJson1MarketoMetadata = (output: any, context: __SerdeContext): MarketoMetadata => { return {} as any; }; diff --git a/clients/client-apprunner/src/endpoints.ts b/clients/client-apprunner/src/endpoints.ts index 9a055bee25aa..a83c5122d048 100644 --- a/clients/client-apprunner/src/endpoints.ts +++ b/clients/client-apprunner/src/endpoints.ts @@ -1,7 +1,32 @@ import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; -const regionHash: RegionHash = {}; +const regionHash: RegionHash = { + "us-east-1": { + variants: [ + { + hostname: "apprunner-fips.us-east-1.amazonaws.com", + tags: ["fips"], + }, + ], + }, + "us-east-2": { + variants: [ + { + hostname: "apprunner-fips.us-east-2.amazonaws.com", + tags: ["fips"], + }, + ], + }, + "us-west-2": { + variants: [ + { + hostname: "apprunner-fips.us-west-2.amazonaws.com", + tags: ["fips"], + }, + ], + }, +}; const partitionHash: PartitionHash = { aws: { @@ -22,6 +47,9 @@ const partitionHash: PartitionHash = { "eu-west-1", "eu-west-2", "eu-west-3", + "fips-us-east-1", + "fips-us-east-2", + "fips-us-west-2", "me-south-1", "sa-east-1", "us-east-1", diff --git a/clients/client-athena/src/Athena.ts b/clients/client-athena/src/Athena.ts index 7b9790cdbf16..b15c5e9fa1c6 100644 --- a/clients/client-athena/src/Athena.ts +++ b/clients/client-athena/src/Athena.ts @@ -153,6 +153,11 @@ import { UpdateDataCatalogCommandInput, UpdateDataCatalogCommandOutput, } from "./commands/UpdateDataCatalogCommand"; +import { + UpdateNamedQueryCommand, + UpdateNamedQueryCommandInput, + UpdateNamedQueryCommandOutput, +} from "./commands/UpdateNamedQueryCommand"; import { UpdatePreparedStatementCommand, UpdatePreparedStatementCommandInput, @@ -1264,6 +1269,38 @@ export class Athena extends AthenaClient { } } + /** + *Updates a NamedQuery object. The database or workgroup cannot be updated.
+ */ + public updateNamedQuery( + args: UpdateNamedQueryCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates a prepared statement.
*/ diff --git a/clients/client-athena/src/AthenaClient.ts b/clients/client-athena/src/AthenaClient.ts index 09ea8780a054..f36790d67c51 100644 --- a/clients/client-athena/src/AthenaClient.ts +++ b/clients/client-athena/src/AthenaClient.ts @@ -107,6 +107,7 @@ import { StopQueryExecutionCommandInput, StopQueryExecutionCommandOutput } from import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; import { UpdateDataCatalogCommandInput, UpdateDataCatalogCommandOutput } from "./commands/UpdateDataCatalogCommand"; +import { UpdateNamedQueryCommandInput, UpdateNamedQueryCommandOutput } from "./commands/UpdateNamedQueryCommand"; import { UpdatePreparedStatementCommandInput, UpdatePreparedStatementCommandOutput, @@ -147,6 +148,7 @@ export type ServiceInputTypes = | TagResourceCommandInput | UntagResourceCommandInput | UpdateDataCatalogCommandInput + | UpdateNamedQueryCommandInput | UpdatePreparedStatementCommandInput | UpdateWorkGroupCommandInput; @@ -183,6 +185,7 @@ export type ServiceOutputTypes = | TagResourceCommandOutput | UntagResourceCommandOutput | UpdateDataCatalogCommandOutput + | UpdateNamedQueryCommandOutput | UpdatePreparedStatementCommandOutput | UpdateWorkGroupCommandOutput; diff --git a/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts b/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts new file mode 100644 index 000000000000..15808dbc2a1e --- /dev/null +++ b/clients/client-athena/src/commands/UpdateNamedQueryCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AthenaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../AthenaClient"; +import { UpdateNamedQueryInput, UpdateNamedQueryOutput } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateNamedQueryCommand, + serializeAws_json1_1UpdateNamedQueryCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateNamedQueryCommandInput extends UpdateNamedQueryInput {} +export interface UpdateNamedQueryCommandOutput extends UpdateNamedQueryOutput, __MetadataBearer {} + +/** + *Updates a NamedQuery object. The database or workgroup cannot be updated.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { AthenaClient, UpdateNamedQueryCommand } from "@aws-sdk/client-athena"; // ES Modules import + * // const { AthenaClient, UpdateNamedQueryCommand } = require("@aws-sdk/client-athena"); // CommonJS import + * const client = new AthenaClient(config); + * const command = new UpdateNamedQueryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateNamedQueryCommandInput} for command's `input` shape. + * @see {@link UpdateNamedQueryCommandOutput} for command's `response` shape. + * @see {@link AthenaClientResolvedConfig | config} for AthenaClient's `config` shape. + * + */ +export class UpdateNamedQueryCommand extends $Command< + UpdateNamedQueryCommandInput, + UpdateNamedQueryCommandOutput, + AthenaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateNamedQueryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackIndicates that an Amazon S3 canned ACL should be set to control ownership of
+ * stored query results. When Athena stores query results in Amazon S3,
+ * the canned ACL is set with the x-amz-acl
request header. For more
+ * information about S3 Object Ownership, see Object Ownership settings in the Amazon S3 User
+ * Guide.
The Amazon S3 canned ACL that Athena should specify when storing
+ * query results. Currently the only supported canned ACL is
+ * BUCKET_OWNER_FULL_CONTROL
. If a query runs in a workgroup and the
+ * workgroup overrides client-side settings, then the Amazon S3 canned ACL
+ * specified in the workgroup's settings is used for all queries that run in the workgroup.
+ * For more information about Amazon S3 canned ACLs, see Canned ACL in the Amazon S3 User
+ * Guide.
An array of query IDs.
@@ -20,8 +53,8 @@ export namespace BatchGetNamedQueryInput { } /** - *A query, where QueryString
is the list of SQL query statements that
- * comprise the query.
A query, where QueryString
contains the SQL statements that
+ * make up the query.
The SQL query statements that comprise the query.
+ *The SQL statements that make up the query.
*/ QueryString: string | undefined; @@ -317,6 +350,16 @@ export interface ResultConfiguration { * and Workgroup Settings Override Client-Side Settings. */ ExpectedBucketOwner?: string; + + /** + *Indicates that an Amazon S3 canned ACL should be set to control ownership of
+ * stored query results. Currently the only supported canned ACL is
+ * BUCKET_OWNER_FULL_CONTROL
. This is a client-side setting. If workgroup
+ * settings override client-side settings, then the query uses the ACL configuration that
+ * is specified for the workgroup, and also uses the location for storing query results
+ * specified in the workgroup. For more information, see WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
AthenaError
feature provides standardized error information to help you
* understand failed queries and take steps after a query failure occurs.
* AthenaError
includes an ErrorCategory
field that specifies
- * whether the cause of the failed query is due to system error, user error, or unknown
+ * whether the cause of the failed query is due to system error, user error, or other
* error.
*/
export interface AthenaError {
@@ -413,7 +456,7 @@ export interface AthenaError {
* * 2 - User
*- * 3 - Unknown
+ * 3 - Other */ ErrorCategory?: number; @@ -1645,7 +1688,7 @@ export namespace Datum { } /** - *The rows that comprise a query result table.
+ *The rows that make up a query result table.
*/ export interface Row { /** @@ -1664,7 +1707,7 @@ export namespace Row { } /** - *The metadata and rows that comprise a query result set. The metadata describes the + *
The metadata and rows that make up a query result set. The metadata describes the
* column structure and data types. To return a ResultSet
object, use GetQueryResults.
The unique identifier (UUID) of the query.
+ */ + NamedQueryId: string | undefined; + + /** + *The name of the query.
+ */ + Name: string | undefined; + + /** + *The query description.
+ */ + Description?: string; + + /** + *The contents of the query with all query statements.
+ */ + QueryString: string | undefined; +} + +export namespace UpdateNamedQueryInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNamedQueryInput): any => ({ + ...obj, + }); +} + +export interface UpdateNamedQueryOutput {} + +export namespace UpdateNamedQueryOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNamedQueryOutput): any => ({ + ...obj, + }); +} + export interface UpdatePreparedStatementInput { /** *The name of the prepared statement.
@@ -2862,6 +2947,22 @@ export interface ResultConfigurationUpdates { * Client-Side Settings. */ RemoveExpectedBucketOwner?: boolean; + + /** + *The ACL configuration for the query results.
+ */ + AclConfiguration?: AclConfiguration; + + /** + *If set to true
, indicates that the previously-specified ACL configuration
+ * for queries in this workgroup should be ignored and set to null. If set to
+ * false
or not set, and a value is present in the
+ * AclConfiguration
of ResultConfigurationUpdates
, the
+ * AclConfiguration
in the workgroup's ResultConfiguration
is
+ * updated with the new value. For more information, see Workgroup Settings Override
+ * Client-Side Settings.
The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and -receive meeting notifications. For more information -about the meeting APIs, see Amazon Chime SDK meetings.
+receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings. ## Installing diff --git a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts index 2b909fcbd01c..b1392a6bcebd 100644 --- a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts +++ b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetings.ts @@ -51,8 +51,7 @@ import { /** *The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and - * receive meeting notifications. For more information - * about the meeting APIs, see Amazon Chime SDK meetings.
+ * receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings. */ export class ChimeSDKMeetings extends ChimeSDKMeetingsClient { /** diff --git a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts index 31341ada678a..0a7e42b3e013 100644 --- a/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts +++ b/clients/client-chime-sdk-meetings/src/ChimeSDKMeetingsClient.ts @@ -257,8 +257,7 @@ export interface ChimeSDKMeetingsClientResolvedConfig extends ChimeSDKMeetingsCl /** *The Amazon Chime SDK meetings APIs in this section allow software developers to create Amazon Chime SDK meetings, set the AWS Regions for meetings, create and manage users, and send and - * receive meeting notifications. For more information - * about the meeting APIs, see Amazon Chime SDK meetings.
+ * receive meeting notifications. For more information about the meeting APIs, see Amazon Chime SDK meetings. */ export class ChimeSDKMeetingsClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-chime-sdk-meetings/src/models/models_0.ts b/clients/client-chime-sdk-meetings/src/models/models_0.ts index 3b6524d70e07..6e05a30ba9ea 100644 --- a/clients/client-chime-sdk-meetings/src/models/models_0.ts +++ b/clients/client-chime-sdk-meetings/src/models/models_0.ts @@ -52,7 +52,7 @@ export enum MeetingFeatureStatus { } /** - *An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.
+ *An optional category of meeting features that contains audio-specific configurations, such as operating parameters for Amazon Voice Focus.
*/ export interface AudioFeatures { /** @@ -409,7 +409,7 @@ export class UnprocessableEntityException extends __BaseException { } /** - *The configuration settings of the features available to a meeting.
+ *The configuration settings of the features available to a meeting.>
*/ export interface MeetingFeaturesConfiguration { /** @@ -470,43 +470,26 @@ export interface CreateMeetingRequest { * *
* Available values:
- * af-south-1
- * ,
- * ap-northeast-1
- * ,
- * ap-northeast-2
- * ,
- * ap-south-1
- * ,
- * ap-southeast-1
- * ,
- * ap-southeast-2
- * ,
- * ca-central-1
- * ,
- * eu-central-1
- * ,
- * eu-north-1
- * ,
- * eu-south-1
- * ,
- * eu-west-1
- * ,
- * eu-west-2
- * ,
- * eu-west-3
- * ,
- * sa-east-1
- * ,
- * us-east-1
- * ,
- * us-east-2
- * ,
- * us-west-1
- * ,
- * us-west-2
- * .
+ * af-south-1
,
+ * ap-northeast-1
,
+ * ap-northeast-2
,
+ * ap-south-1
,
+ * ap-southeast-1
,
+ * ap-southeast-2
,
+ * ca-central-1
,
+ * eu-central-1
,
+ * eu-north-1
,
+ * eu-south-1
,
+ * eu-west-1
,
+ * eu-west-2
,
+ * eu-west-3
,
+ * sa-east-1
,
+ * us-east-1
,
+ * us-east-2
,
+ * us-west-1
,
+ * us-west-2
.
*
Available values in AWS GovCloud (US) Regions: us-gov-east-1
, us-gov-west-1
.
eu-west-1
, eu-west-2
, eu-west-3
,
* sa-east-1
, us-east-1
, us-east-2
,
* us-west-1
, us-west-2
.
+ * Available values in AWS GovCloud (US) Regions: us-gov-east-1
, us-gov-west-1
.
The Region in which to create the meeting.
+ * + *
+ * Available values:
+ * af-south-1
,
+ * ap-northeast-1
,
+ * ap-northeast-2
,
+ * ap-south-1
,
+ * ap-southeast-1
,
+ * ap-southeast-2
,
+ * ca-central-1
,
+ * eu-central-1
,
+ * eu-north-1
,
+ * eu-south-1
,
+ * eu-west-1
,
+ * eu-west-2
,
+ * eu-west-3
,
+ * sa-east-1
,
+ * us-east-1
,
+ * us-east-2
,
+ * us-west-1
,
+ * us-west-2
.
+ *
Available values in AWS GovCloud (US) Regions: us-gov-east-1
, us-gov-west-1
.
The language code specified for the Amazon Transcribe engine.
*/ - LanguageCode: TranscribeLanguageCode | string | undefined; + LanguageCode?: TranscribeLanguageCode | string; /** *The filtering method passed to Amazon Transcribe.
@@ -1090,11 +1097,12 @@ export interface EngineTranscribeSettings { /** *Lists the PII entity types you want to identify or redact. To specify entity types, you must enable ContentIdentificationType
or ContentRedactionType
.
PIIEntityTypes must be comma-separated. The available values are:
- * BANK_ACCOUNT_NUMBER
, BANK_ROUTING, CREDIT_DEBIT_NUMBER
, CREDIT_DEBIT_CVV
, CREDIT_DEBIT_EXPIRY
, PIN
, EMAIL
,
- * ADDRESS
, NAME
, PHONE
, SSN
, and ALL
.
+ * PIIEntityTypes
must be comma-separated. The available values are:
+ * BANK_ACCOUNT_NUMBER
, BANK_ROUTING, CREDIT_DEBIT_NUMBER
, CREDIT_DEBIT_CVV
, CREDIT_DEBIT_EXPIRY
, PIN
, EMAIL
,
+ * ADDRESS
, NAME
, PHONE
, SSN
, and ALL
.
+ *
* PiiEntityTypes
is an optional parameter with a default value of ALL
.
The name of the language model used during transcription.
*/ LanguageModelName?: string; + + /** + *Automatically identifies the language spoken in media files.
+ */ + IdentifyLanguage?: boolean; + + /** + *Language codes for the languages that you want to identify. You must provide at least 2 codes.
+ */ + LanguageOptions?: string; + + /** + *Language code for the preferred language.
+ */ + PreferredLanguage?: TranscribeLanguageCode | string; } export namespace EngineTranscribeSettings { diff --git a/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts b/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts index 3ff68e0081a2..30a51dad838e 100644 --- a/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts +++ b/clients/client-chime-sdk-meetings/src/protocols/Aws_restJson1.ts @@ -1287,13 +1287,19 @@ const serializeAws_restJson1EngineTranscribeSettings = ( input.EnablePartialResultsStabilization !== null && { EnablePartialResultsStabilization: input.EnablePartialResultsStabilization, }), + ...(input.IdentifyLanguage !== undefined && + input.IdentifyLanguage !== null && { IdentifyLanguage: input.IdentifyLanguage }), ...(input.LanguageCode !== undefined && input.LanguageCode !== null && { LanguageCode: input.LanguageCode }), ...(input.LanguageModelName !== undefined && input.LanguageModelName !== null && { LanguageModelName: input.LanguageModelName }), + ...(input.LanguageOptions !== undefined && + input.LanguageOptions !== null && { LanguageOptions: input.LanguageOptions }), ...(input.PartialResultsStability !== undefined && input.PartialResultsStability !== null && { PartialResultsStability: input.PartialResultsStability }), ...(input.PiiEntityTypes !== undefined && input.PiiEntityTypes !== null && { PiiEntityTypes: input.PiiEntityTypes }), + ...(input.PreferredLanguage !== undefined && + input.PreferredLanguage !== null && { PreferredLanguage: input.PreferredLanguage }), ...(input.Region !== undefined && input.Region !== null && { Region: input.Region }), ...(input.VocabularyFilterMethod !== undefined && input.VocabularyFilterMethod !== null && { VocabularyFilterMethod: input.VocabularyFilterMethod }), diff --git a/clients/client-cloudtrail/src/CloudTrail.ts b/clients/client-cloudtrail/src/CloudTrail.ts index af0212b8f5e1..56b4bf44cf92 100644 --- a/clients/client-cloudtrail/src/CloudTrail.ts +++ b/clients/client-cloudtrail/src/CloudTrail.ts @@ -148,7 +148,7 @@ export class CloudTrail extends CloudTrailClient { } /** - *Cancels a query if the query is not in a terminated state, such as CANCELLED
, FAILED
or FINISHED
. You must specify an ARN value for EventDataStore
.
+ *
Cancels a query if the query is not in a terminated state, such as CANCELLED
, FAILED
, TIMED_OUT
, or FINISHED
. You must specify an ARN value for EventDataStore
.
* The ID of the query that you want to cancel is also required. When you run CancelQuery
, the query status might
* show as CANCELLED
even if the operation is not yet finished.
EventDataStore
. Optionally, to shorten the list of results, you can specify a time range,
* formatted as timestamps, by adding StartTime
and EndTime
parameters, and a
* QueryStatus
value. Valid values for QueryStatus
include QUEUED
, RUNNING
,
- * FINISHED
, FAILED
, or CANCELLED
.
+ * FINISHED
, FAILED
, TIMED_OUT
, or CANCELLED
.
*/
public listQueries(args: ListQueriesCommandInput, options?: __HttpHandlerOptions): PromiseCancels a query if the query is not in a terminated state, such as CANCELLED
, FAILED
or FINISHED
. You must specify an ARN value for EventDataStore
.
+ *
Cancels a query if the query is not in a terminated state, such as CANCELLED
, FAILED
, TIMED_OUT
, or FINISHED
. You must specify an ARN value for EventDataStore
.
* The ID of the query that you want to cancel is also required. When you run CancelQuery
, the query status might
* show as CANCELLED
even if the operation is not yet finished.
EventDataStore
. Optionally, to shorten the list of results, you can specify a time range,
* formatted as timestamps, by adding StartTime
and EndTime
parameters, and a
* QueryStatus
value. Valid values for QueryStatus
include QUEUED
, RUNNING
,
- * FINISHED
, FAILED
, or CANCELLED
.
+ * FINISHED
, FAILED
, TIMED_OUT
, or CANCELLED
.
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
diff --git a/clients/client-cloudtrail/src/endpoints.ts b/clients/client-cloudtrail/src/endpoints.ts
index f736c71ff03a..030185eb0561 100644
--- a/clients/client-cloudtrail/src/endpoints.ts
+++ b/clients/client-cloudtrail/src/endpoints.ts
@@ -22,19 +22,17 @@ const regionHash: RegionHash = {
variants: [
{
hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-east-1",
},
"us-gov-west-1": {
variants: [
{
hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-west-1",
},
"us-west-1": {
variants: [
@@ -155,7 +153,7 @@ const partitionHash: PartitionHash = {
],
},
"aws-us-gov": {
- regions: ["us-gov-east-1", "us-gov-west-1"],
+ regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"],
regionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
variants: [
{
diff --git a/clients/client-cloudtrail/src/models/models_0.ts b/clients/client-cloudtrail/src/models/models_0.ts
index e68ed923f28e..3c04bae4d83b 100644
--- a/clients/client-cloudtrail/src/models/models_0.ts
+++ b/clients/client-cloudtrail/src/models/models_0.ts
@@ -751,6 +751,7 @@ export enum QueryStatus {
FINISHED = "FINISHED",
QUEUED = "QUEUED",
RUNNING = "RUNNING",
+ TIMED_OUT = "TIMED_OUT",
}
export interface CancelQueryResponse {
@@ -800,7 +801,7 @@ export class EventDataStoreARNInvalidException extends __BaseException {
}
/**
- * The specified query cannot be canceled because it is in the FINISHED
, FAILED
, or
+ *
The specified query cannot be canceled because it is in the FINISHED
, FAILED
, TIMED_OUT
, or
* CANCELLED
state.
The total bytes that the query scanned in the event data store. This value matches the number of + * bytes for which your account is billed for the query, unless the query is still running.
+ */ + BytesScanned?: number; + /** *The query's run time, in milliseconds.
*/ @@ -2030,7 +2037,7 @@ export interface DescribeQueryResponse { /** *The status of a query. Values for QueryStatus
include QUEUED
, RUNNING
,
- * FINISHED
, FAILED
, or CANCELLED
+ * FINISHED
, FAILED
, TIMED_OUT
, or CANCELLED
*
The total number of results returned by a query.
*/ TotalResultsCount?: number; + + /** + *The total bytes that the query scanned in the event data store. This value matches the number of + * bytes for which your account is billed for the query, unless the query is still running.
+ */ + BytesScanned?: number; } export namespace QueryStatistics { @@ -2785,7 +2798,7 @@ export namespace QueryStatistics { export interface GetQueryResultsResponse { /** *The status of the query. Values include QUEUED
, RUNNING
, FINISHED
, FAILED
,
- * or CANCELLED
.
TIMED_OUT
, or CANCELLED
.
*/
QueryStatus?: QueryStatus | string;
@@ -3368,7 +3381,7 @@ export interface ListQueriesRequest {
/**
* The status of queries that you want to return in results. Valid values for QueryStatus
include QUEUED
, RUNNING
,
- * FINISHED
, FAILED
, or CANCELLED
.
FINISHED
, FAILED
, TIMED_OUT
, or CANCELLED
.
*/
QueryStatus?: QueryStatus | string;
}
@@ -3393,7 +3406,7 @@ export interface Query {
/**
* The status of the query. This can be QUEUED
, RUNNING
, FINISHED
, FAILED
,
- * or CANCELLED
.
TIMED_OUT
, or CANCELLED
.
*/
QueryStatus?: QueryStatus | string;
diff --git a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts
index 71f0c34381d4..58c35bf4db48 100644
--- a/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts
+++ b/clients/client-cloudtrail/src/protocols/Aws_json1_1.ts
@@ -4912,6 +4912,7 @@ const deserializeAws_json1_1QueryResultRows = (output: any, context: __SerdeCont
const deserializeAws_json1_1QueryStatistics = (output: any, context: __SerdeContext): QueryStatistics => {
return {
+ BytesScanned: __expectLong(output.BytesScanned),
ResultsCount: __expectInt32(output.ResultsCount),
TotalResultsCount: __expectInt32(output.TotalResultsCount),
} as any;
@@ -4922,6 +4923,7 @@ const deserializeAws_json1_1QueryStatisticsForDescribeQuery = (
context: __SerdeContext
): QueryStatisticsForDescribeQuery => {
return {
+ BytesScanned: __expectLong(output.BytesScanned),
CreationTime:
output.CreationTime !== undefined && output.CreationTime !== null
? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime)))
diff --git a/clients/client-cloudwatch-events/src/endpoints.ts b/clients/client-cloudwatch-events/src/endpoints.ts
index ead0160761ed..d420f4722c13 100644
--- a/clients/client-cloudwatch-events/src/endpoints.ts
+++ b/clients/client-cloudwatch-events/src/endpoints.ts
@@ -22,19 +22,17 @@ const regionHash: RegionHash = {
variants: [
{
hostname: "events.us-gov-east-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-east-1",
},
"us-gov-west-1": {
variants: [
{
hostname: "events.us-gov-west-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-west-1",
},
"us-west-1": {
variants: [
@@ -155,7 +153,7 @@ const partitionHash: PartitionHash = {
],
},
"aws-us-gov": {
- regions: ["us-gov-east-1", "us-gov-west-1"],
+ regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"],
regionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
variants: [
{
diff --git a/clients/client-cloudwatch-logs/src/endpoints.ts b/clients/client-cloudwatch-logs/src/endpoints.ts
index 9ccc59bbf3cc..b7fd7af15468 100644
--- a/clients/client-cloudwatch-logs/src/endpoints.ts
+++ b/clients/client-cloudwatch-logs/src/endpoints.ts
@@ -22,19 +22,17 @@ const regionHash: RegionHash = {
variants: [
{
hostname: "logs.us-gov-east-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-east-1",
},
"us-gov-west-1": {
variants: [
{
hostname: "logs.us-gov-west-1.amazonaws.com",
- tags: [],
+ tags: ["fips"],
},
],
- signingRegion: "us-gov-west-1",
},
"us-west-1": {
variants: [
@@ -155,7 +153,7 @@ const partitionHash: PartitionHash = {
],
},
"aws-us-gov": {
- regions: ["us-gov-east-1", "us-gov-west-1"],
+ regions: ["fips-us-gov-east-1", "fips-us-gov-west-1", "us-gov-east-1", "us-gov-west-1"],
regionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
variants: [
{
diff --git a/clients/client-comprehend/src/Comprehend.ts b/clients/client-comprehend/src/Comprehend.ts
index caaeb6c85e9d..405fe07670d7 100644
--- a/clients/client-comprehend/src/Comprehend.ts
+++ b/clients/client-comprehend/src/Comprehend.ts
@@ -125,6 +125,11 @@ import {
DescribeSentimentDetectionJobCommandInput,
DescribeSentimentDetectionJobCommandOutput,
} from "./commands/DescribeSentimentDetectionJobCommand";
+import {
+ DescribeTargetedSentimentDetectionJobCommand,
+ DescribeTargetedSentimentDetectionJobCommandInput,
+ DescribeTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/DescribeTargetedSentimentDetectionJobCommand";
import {
DescribeTopicsDetectionJobCommand,
DescribeTopicsDetectionJobCommandInput,
@@ -226,6 +231,11 @@ import {
ListTagsForResourceCommandInput,
ListTagsForResourceCommandOutput,
} from "./commands/ListTagsForResourceCommand";
+import {
+ ListTargetedSentimentDetectionJobsCommand,
+ ListTargetedSentimentDetectionJobsCommandInput,
+ ListTargetedSentimentDetectionJobsCommandOutput,
+} from "./commands/ListTargetedSentimentDetectionJobsCommand";
import {
ListTopicsDetectionJobsCommand,
ListTopicsDetectionJobsCommandInput,
@@ -271,6 +281,11 @@ import {
StartSentimentDetectionJobCommandInput,
StartSentimentDetectionJobCommandOutput,
} from "./commands/StartSentimentDetectionJobCommand";
+import {
+ StartTargetedSentimentDetectionJobCommand,
+ StartTargetedSentimentDetectionJobCommandInput,
+ StartTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/StartTargetedSentimentDetectionJobCommand";
import {
StartTopicsDetectionJobCommand,
StartTopicsDetectionJobCommandInput,
@@ -306,6 +321,11 @@ import {
StopSentimentDetectionJobCommandInput,
StopSentimentDetectionJobCommandOutput,
} from "./commands/StopSentimentDetectionJobCommand";
+import {
+ StopTargetedSentimentDetectionJobCommand,
+ StopTargetedSentimentDetectionJobCommandInput,
+ StopTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/StopTargetedSentimentDetectionJobCommand";
import {
StopTrainingDocumentClassifierCommand,
StopTrainingDocumentClassifierCommandInput,
@@ -1174,6 +1194,41 @@ export class Comprehend extends ComprehendClient {
}
}
+ /**
+ * Gets the properties associated with a targeted sentiment detection job. Use this operation + * to get the status of the job.
+ */ + public describeTargetedSentimentDetectionJob( + args: DescribeTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets the properties associated with a topic detection job. Use this operation to get * the status of a detection job.
@@ -1853,6 +1908,38 @@ export class Comprehend extends ComprehendClient { } } + /** + *Gets a list of targeted sentiment detection jobs that you have submitted.
+ */ + public listTargetedSentimentDetectionJobs( + args: ListTargetedSentimentDetectionJobsCommandInput, + options?: __HttpHandlerOptions + ): PromiseGets a list of the topic detection jobs that you have submitted.
*/ @@ -2121,7 +2208,7 @@ export class Comprehend extends ComprehendClient { } /** - *Starts an asynchronous sentiment detection job for a collection of documents. use the + *
Starts an asynchronous sentiment detection job for a collection of documents. Use the * operation to track the status of a * job.
*/ @@ -2154,6 +2241,40 @@ export class Comprehend extends ComprehendClient { } } + /** + *Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the + * operation to track the status of a + * job.
+ */ + public startTargetedSentimentDetectionJob( + args: StartTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseStarts an asynchronous topic detection job. Use the
* DescribeTopicDetectionJob
operation to track the status of a job.
Stops a targeted sentiment detection job in progress.
+ *If the job state is IN_PROGRESS
the job is marked for termination and put
+ * into the STOP_REQUESTED
state. If the job completes before it can be stopped, it
+ * is put into the COMPLETED
state; otherwise the job is be stopped and put into the
+ * STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the
+ * StopDominantLanguageDetectionJob
operation, the operation returns a 400
+ * Internal Request Exception.
When a job is stopped, any documents already processed are written to the output + * location.
+ */ + public stopTargetedSentimentDetectionJob( + args: StopTargetedSentimentDetectionJobCommandInput, + options?: __HttpHandlerOptions + ): PromiseStops a document classifier training job while in progress.
*If the training job state is TRAINING
, the job is marked for termination and
diff --git a/clients/client-comprehend/src/ComprehendClient.ts b/clients/client-comprehend/src/ComprehendClient.ts
index 989fb74a8540..9ebfe1c243a8 100644
--- a/clients/client-comprehend/src/ComprehendClient.ts
+++ b/clients/client-comprehend/src/ComprehendClient.ts
@@ -136,6 +136,10 @@ import {
DescribeSentimentDetectionJobCommandInput,
DescribeSentimentDetectionJobCommandOutput,
} from "./commands/DescribeSentimentDetectionJobCommand";
+import {
+ DescribeTargetedSentimentDetectionJobCommandInput,
+ DescribeTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/DescribeTargetedSentimentDetectionJobCommand";
import {
DescribeTopicsDetectionJobCommandInput,
DescribeTopicsDetectionJobCommandOutput,
@@ -199,6 +203,10 @@ import {
ListTagsForResourceCommandInput,
ListTagsForResourceCommandOutput,
} from "./commands/ListTagsForResourceCommand";
+import {
+ ListTargetedSentimentDetectionJobsCommandInput,
+ ListTargetedSentimentDetectionJobsCommandOutput,
+} from "./commands/ListTargetedSentimentDetectionJobsCommand";
import {
ListTopicsDetectionJobsCommandInput,
ListTopicsDetectionJobsCommandOutput,
@@ -232,6 +240,10 @@ import {
StartSentimentDetectionJobCommandInput,
StartSentimentDetectionJobCommandOutput,
} from "./commands/StartSentimentDetectionJobCommand";
+import {
+ StartTargetedSentimentDetectionJobCommandInput,
+ StartTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/StartTargetedSentimentDetectionJobCommand";
import {
StartTopicsDetectionJobCommandInput,
StartTopicsDetectionJobCommandOutput,
@@ -260,6 +272,10 @@ import {
StopSentimentDetectionJobCommandInput,
StopSentimentDetectionJobCommandOutput,
} from "./commands/StopSentimentDetectionJobCommand";
+import {
+ StopTargetedSentimentDetectionJobCommandInput,
+ StopTargetedSentimentDetectionJobCommandOutput,
+} from "./commands/StopTargetedSentimentDetectionJobCommand";
import {
StopTrainingDocumentClassifierCommandInput,
StopTrainingDocumentClassifierCommandOutput,
@@ -299,6 +315,7 @@ export type ServiceInputTypes =
| DescribePiiEntitiesDetectionJobCommandInput
| DescribeResourcePolicyCommandInput
| DescribeSentimentDetectionJobCommandInput
+ | DescribeTargetedSentimentDetectionJobCommandInput
| DescribeTopicsDetectionJobCommandInput
| DetectDominantLanguageCommandInput
| DetectEntitiesCommandInput
@@ -320,6 +337,7 @@ export type ServiceInputTypes =
| ListPiiEntitiesDetectionJobsCommandInput
| ListSentimentDetectionJobsCommandInput
| ListTagsForResourceCommandInput
+ | ListTargetedSentimentDetectionJobsCommandInput
| ListTopicsDetectionJobsCommandInput
| PutResourcePolicyCommandInput
| StartDocumentClassificationJobCommandInput
@@ -329,6 +347,7 @@ export type ServiceInputTypes =
| StartKeyPhrasesDetectionJobCommandInput
| StartPiiEntitiesDetectionJobCommandInput
| StartSentimentDetectionJobCommandInput
+ | StartTargetedSentimentDetectionJobCommandInput
| StartTopicsDetectionJobCommandInput
| StopDominantLanguageDetectionJobCommandInput
| StopEntitiesDetectionJobCommandInput
@@ -336,6 +355,7 @@ export type ServiceInputTypes =
| StopKeyPhrasesDetectionJobCommandInput
| StopPiiEntitiesDetectionJobCommandInput
| StopSentimentDetectionJobCommandInput
+ | StopTargetedSentimentDetectionJobCommandInput
| StopTrainingDocumentClassifierCommandInput
| StopTrainingEntityRecognizerCommandInput
| TagResourceCommandInput
@@ -368,6 +388,7 @@ export type ServiceOutputTypes =
| DescribePiiEntitiesDetectionJobCommandOutput
| DescribeResourcePolicyCommandOutput
| DescribeSentimentDetectionJobCommandOutput
+ | DescribeTargetedSentimentDetectionJobCommandOutput
| DescribeTopicsDetectionJobCommandOutput
| DetectDominantLanguageCommandOutput
| DetectEntitiesCommandOutput
@@ -389,6 +410,7 @@ export type ServiceOutputTypes =
| ListPiiEntitiesDetectionJobsCommandOutput
| ListSentimentDetectionJobsCommandOutput
| ListTagsForResourceCommandOutput
+ | ListTargetedSentimentDetectionJobsCommandOutput
| ListTopicsDetectionJobsCommandOutput
| PutResourcePolicyCommandOutput
| StartDocumentClassificationJobCommandOutput
@@ -398,6 +420,7 @@ export type ServiceOutputTypes =
| StartKeyPhrasesDetectionJobCommandOutput
| StartPiiEntitiesDetectionJobCommandOutput
| StartSentimentDetectionJobCommandOutput
+ | StartTargetedSentimentDetectionJobCommandOutput
| StartTopicsDetectionJobCommandOutput
| StopDominantLanguageDetectionJobCommandOutput
| StopEntitiesDetectionJobCommandOutput
@@ -405,6 +428,7 @@ export type ServiceOutputTypes =
| StopKeyPhrasesDetectionJobCommandOutput
| StopPiiEntitiesDetectionJobCommandOutput
| StopSentimentDetectionJobCommandOutput
+ | StopTargetedSentimentDetectionJobCommandOutput
| StopTrainingDocumentClassifierCommandOutput
| StopTrainingEntityRecognizerCommandOutput
| TagResourceCommandOutput
diff --git a/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts
new file mode 100644
index 000000000000..5b9256941c2f
--- /dev/null
+++ b/clients/client-comprehend/src/commands/DescribeTargetedSentimentDetectionJobCommand.ts
@@ -0,0 +1,108 @@
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ MiddlewareStack,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient";
+import {
+ DescribeTargetedSentimentDetectionJobRequest,
+ DescribeTargetedSentimentDetectionJobResponse,
+} from "../models/models_0";
+import {
+ deserializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand,
+ serializeAws_json1_1DescribeTargetedSentimentDetectionJobCommand,
+} from "../protocols/Aws_json1_1";
+
+export interface DescribeTargetedSentimentDetectionJobCommandInput
+ extends DescribeTargetedSentimentDetectionJobRequest {}
+export interface DescribeTargetedSentimentDetectionJobCommandOutput
+ extends DescribeTargetedSentimentDetectionJobResponse,
+ __MetadataBearer {}
+
+/**
+ *
Gets the properties associated with a targeted sentiment detection job. Use this operation + * to get the status of the job.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, DescribeTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, DescribeTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new DescribeTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link DescribeTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class DescribeTargetedSentimentDetectionJobCommand extends $Command< + DescribeTargetedSentimentDetectionJobCommandInput, + DescribeTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackGets a list of targeted sentiment detection jobs that you have submitted.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, ListTargetedSentimentDetectionJobsCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, ListTargetedSentimentDetectionJobsCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new ListTargetedSentimentDetectionJobsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTargetedSentimentDetectionJobsCommandInput} for command's `input` shape. + * @see {@link ListTargetedSentimentDetectionJobsCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class ListTargetedSentimentDetectionJobsCommand extends $Command< + ListTargetedSentimentDetectionJobsCommandInput, + ListTargetedSentimentDetectionJobsCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTargetedSentimentDetectionJobsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStarts an asynchronous sentiment detection job for a collection of documents. use the + *
Starts an asynchronous sentiment detection job for a collection of documents. Use the * operation to track the status of a * job.
* @example diff --git a/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts b/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts new file mode 100644 index 000000000000..f5344a64da29 --- /dev/null +++ b/clients/client-comprehend/src/commands/StartTargetedSentimentDetectionJobCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComprehendClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComprehendClient"; +import { + StartTargetedSentimentDetectionJobRequest, + StartTargetedSentimentDetectionJobResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1StartTargetedSentimentDetectionJobCommand, + serializeAws_json1_1StartTargetedSentimentDetectionJobCommand, +} from "../protocols/Aws_json1_1"; + +export interface StartTargetedSentimentDetectionJobCommandInput extends StartTargetedSentimentDetectionJobRequest {} +export interface StartTargetedSentimentDetectionJobCommandOutput + extends StartTargetedSentimentDetectionJobResponse, + __MetadataBearer {} + +/** + *Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the + * operation to track the status of a + * job.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, StartTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, StartTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new StartTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link StartTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class StartTargetedSentimentDetectionJobCommand extends $Command< + StartTargetedSentimentDetectionJobCommandInput, + StartTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackStops a targeted sentiment detection job in progress.
+ *If the job state is IN_PROGRESS
the job is marked for termination and put
+ * into the STOP_REQUESTED
state. If the job completes before it can be stopped, it
+ * is put into the COMPLETED
state; otherwise the job is be stopped and put into the
+ * STOPPED
state.
If the job is in the COMPLETED
or FAILED
state when you call the
+ * StopDominantLanguageDetectionJob
operation, the operation returns a 400
+ * Internal Request Exception.
When a job is stopped, any documents already processed are written to the output + * location.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComprehendClient, StopTargetedSentimentDetectionJobCommand } from "@aws-sdk/client-comprehend"; // ES Modules import + * // const { ComprehendClient, StopTargetedSentimentDetectionJobCommand } = require("@aws-sdk/client-comprehend"); // CommonJS import + * const client = new ComprehendClient(config); + * const command = new StopTargetedSentimentDetectionJobCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopTargetedSentimentDetectionJobCommandInput} for command's `input` shape. + * @see {@link StopTargetedSentimentDetectionJobCommandOutput} for command's `response` shape. + * @see {@link ComprehendClientResolvedConfig | config} for ComprehendClient's `config` shape. + * + */ +export class StopTargetedSentimentDetectionJobCommand extends $Command< + StopTargetedSentimentDetectionJobCommandInput, + StopTargetedSentimentDetectionJobCommandOutput, + ComprehendClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopTargetedSentimentDetectionJobCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackProvides configuration parameters for the output of topic detection jobs.
+ *Provides configuration parameters for the output of inference jobs.
* */ export interface OutputDataConfig { @@ -2387,6 +2387,10 @@ export interface OutputDataConfig { * directory specific to the job. TheS3Uri
field contains the location of the
* output file, called output.tar.gz
. It is a compressed archive that contains the
* ouput of the operation.
+ *
+ * For a PII entity detection job, the output file is plain text, not a compressed archive.
+ * The output file name is the same as the input file, with .out
appended at the end.
+ *
A measure of how accurate the recognizer results are for the test data. It is derived from
* the Precision
and Recall
values. The F1Score
is the
- * harmonic average of the two scores. The highest score is 1, and the worst score is 0.
When you use the PiiOutputDataConfig
object with asynchronous operations,
* you specify the Amazon S3 location where you want to write the output data.
+ * For a PII entity detection job, the output file is plain text, not a compressed archive.
+ * The output file name is the same as the input file, with .out
appended at the end.
+ *
The identifier that Amazon Comprehend generated for the job. The operation returns this identifier in its + * response.
+ */ + JobId: string | undefined; +} + +export namespace DescribeTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +/** + *Provides information about a targeted sentiment detection job.
+ */ +export interface TargetedSentimentDetectionJobProperties { + /** + *The identifier assigned to the targeted sentiment detection job.
+ */ + JobId?: string; + + /** + *The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully + * qualified identifier for the job. It includes the AWS account, Region, and the job ID. The + * format of the ARN is as follows:
+ *
+ * arn:
+ *
The following is an example job ARN:
+ *
+ * arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab
+ *
The name that you assigned to the targeted sentiment detection job.
+ */ + JobName?: string; + + /** + *The current status of the targeted sentiment detection job. If the status is FAILED
,
+ * the Messages
field shows the reason for the failure.
A description of the status of a job.
+ */ + Message?: string; + + /** + *The time that the targeted sentiment detection job was submitted for processing.
+ */ + SubmitTime?: Date; + + /** + *The time that the targeted sentiment detection job ended.
+ */ + EndTime?: Date; + + /** + *The input properties for an inference job.
+ */ + InputDataConfig?: InputDataConfig; + + /** + *Provides configuration parameters for the output of inference jobs.
+ * + */ + OutputDataConfig?: OutputDataConfig; + + /** + *The language code of the input documents.
+ */ + LanguageCode?: LanguageCode | string; + + /** + *The Amazon Resource Name (ARN) that gives Amazon Comprehend read access to your input + * data.
+ */ + DataAccessRoleArn?: string; + + /** + *ID for the AWS Key Management Service (KMS) key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the + * targeted sentiment detection job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for the job. For more information, see Amazon + * VPC.
+ */ + VpcConfig?: VpcConfig; +} + +export namespace TargetedSentimentDetectionJobProperties { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetedSentimentDetectionJobProperties): any => ({ + ...obj, + }); +} + +export interface DescribeTargetedSentimentDetectionJobResponse { + /** + *An object that contains the properties associated with a targeted sentiment detection job.
+ */ + TargetedSentimentDetectionJobProperties?: TargetedSentimentDetectionJobProperties; +} + +export namespace DescribeTargetedSentimentDetectionJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTargetedSentimentDetectionJobResponse): any => ({ + ...obj, + }); +} + export interface DescribeTopicsDetectionJobRequest { /** *The identifier assigned by the user to the detection job.
@@ -5741,6 +5891,94 @@ export namespace ListTagsForResourceResponse { }); } +/** + *Provides information for filtering a list of dominant language detection jobs. For more + * information, see the operation.
+ */ +export interface TargetedSentimentDetectionJobFilter { + /** + *Filters on the name of the job.
+ */ + JobName?: string; + + /** + *Filters the list of jobs based on job status. Returns only jobs with the specified + * status.
+ */ + JobStatus?: JobStatus | string; + + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted before the specified time. Jobs are returned in ascending order, + * oldest to newest.
+ */ + SubmitTimeBefore?: Date; + + /** + *Filters the list of jobs based on the time that the job was submitted for processing. + * Returns only jobs submitted after the specified time. Jobs are returned in descending order, + * newest to oldest.
+ */ + SubmitTimeAfter?: Date; +} + +export namespace TargetedSentimentDetectionJobFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TargetedSentimentDetectionJobFilter): any => ({ + ...obj, + }); +} + +export interface ListTargetedSentimentDetectionJobsRequest { + /** + *Filters the jobs that are returned. You can filter jobs on their name, status, or the date + * and time that they were submitted. You can only set one filter at a time.
+ */ + Filter?: TargetedSentimentDetectionJobFilter; + + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; + + /** + *The maximum number of results to return in each page. The default is 100.
+ */ + MaxResults?: number; +} + +export namespace ListTargetedSentimentDetectionJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTargetedSentimentDetectionJobsRequest): any => ({ + ...obj, + }); +} + +export interface ListTargetedSentimentDetectionJobsResponse { + /** + *A list containing the properties of each job that is returned.
+ */ + TargetedSentimentDetectionJobPropertiesList?: TargetedSentimentDetectionJobProperties[]; + + /** + *Identifies the next page of results to return.
+ */ + NextToken?: string; +} + +export namespace ListTargetedSentimentDetectionJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTargetedSentimentDetectionJobsResponse): any => ({ + ...obj, + }); +} + /** *Provides information for filtering topic detection jobs. For more information, see * .
@@ -6724,6 +6962,132 @@ export namespace StartSentimentDetectionJobResponse { }); } +export interface StartTargetedSentimentDetectionJobRequest { + /** + *The input properties for an inference job.
+ */ + InputDataConfig: InputDataConfig | undefined; + + /** + *Specifies where to send the output files.
+ */ + OutputDataConfig: OutputDataConfig | undefined; + + /** + *The Amazon Resource Name (ARN) of the AWS Identity and Access Management (IAM) role that + * grants Amazon Comprehend read access to your input data. For more information, see Role-based permissions.
+ */ + DataAccessRoleArn: string | undefined; + + /** + *The identifier of the job.
+ */ + JobName?: string; + + /** + *The language of the input documents. You can specify any of the primary languages + * supported by Amazon Comprehend. All documents must be in the same language.
+ */ + LanguageCode: LanguageCode | string | undefined; + + /** + *A unique identifier for the request. If you don't set the client request token, Amazon + * Comprehend generates one.
+ */ + ClientRequestToken?: string; + + /** + *ID for the KMS key that Amazon Comprehend uses to encrypt + * data on the storage volume attached to the ML compute instance(s) that process the analysis + * job. The VolumeKmsKeyId can be either of the following formats:
+ *KMS Key ID: "1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Amazon Resource Name (ARN) of a KMS Key:
+ * "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
+ *
Configuration parameters for an optional private Virtual Private Cloud (VPC) containing + * the resources you are using for the job. For more information, see Amazon + * VPC.
+ */ + VpcConfig?: VpcConfig; + + /** + *Tags to be associated with the targeted sentiment detection job. A tag is a key-value pair that + * adds metadata to a resource used by Amazon Comprehend. For example, a tag with "Sales" as the + * key might be added to a resource to indicate its use by the sales department.
+ */ + Tags?: Tag[]; +} + +export namespace StartTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StartTargetedSentimentDetectionJobResponse { + /** + *The identifier generated for the job. To get the status of a job, use this identifier with + * the operation.
+ */ + JobId?: string; + + /** + *The Amazon Resource Name (ARN) of the targeted sentiment detection job. It is a unique, fully + * qualified identifier for the job. It includes the AWS account, Region, and the job ID. The + * format of the ARN is as follows:
+ *
+ * arn:
+ *
The following is an example job ARN:
+ *
+ * arn:aws:comprehend:us-west-2:111122223333:targeted-sentiment-detection-job/1234abcd12ab34cd56ef1234567890ab
+ *
The status of the job.
+ *SUBMITTED - The job has been received and is queued for processing.
+ *IN_PROGRESS - Amazon Comprehend is processing the job.
+ *COMPLETED - The job was successfully completed and the output is available.
+ *FAILED - The job did not complete. To get details, use the operation.
+ *Specifies the format and location of the input data for the job.
@@ -7083,6 +7447,45 @@ export namespace StopSentimentDetectionJobResponse { }); } +export interface StopTargetedSentimentDetectionJobRequest { + /** + *The identifier of the targeted sentiment detection job to stop.
+ */ + JobId: string | undefined; +} + +export namespace StopTargetedSentimentDetectionJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopTargetedSentimentDetectionJobRequest): any => ({ + ...obj, + }); +} + +export interface StopTargetedSentimentDetectionJobResponse { + /** + *The identifier of the targeted sentiment detection job to stop.
+ */ + JobId?: string; + + /** + *Either STOP_REQUESTED
if the job is currently running, or
+ * STOPPED
if the job was previously stopped with the
+ * StopSentimentDetectionJob
operation.
The Amazon Resource Name (ARN) that identifies the document classifier currently being
diff --git a/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts b/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts
new file mode 100644
index 000000000000..f632f1b85e1e
--- /dev/null
+++ b/clients/client-comprehend/src/pagination/ListTargetedSentimentDetectionJobsPaginator.ts
@@ -0,0 +1,59 @@
+import { Paginator } from "@aws-sdk/types";
+
+import {
+ ListTargetedSentimentDetectionJobsCommand,
+ ListTargetedSentimentDetectionJobsCommandInput,
+ ListTargetedSentimentDetectionJobsCommandOutput,
+} from "../commands/ListTargetedSentimentDetectionJobsCommand";
+import { Comprehend } from "../Comprehend";
+import { ComprehendClient } from "../ComprehendClient";
+import { ComprehendPaginationConfiguration } from "./Interfaces";
+
+/**
+ * @private
+ */
+const makePagedClientRequest = async (
+ client: ComprehendClient,
+ input: ListTargetedSentimentDetectionJobsCommandInput,
+ ...args: any
+): Promise This operation lists details about a DevOps Guru event source that is shared with your
 account. Returns the most recent feedback submitted in the current Amazon Web Services account and Region.
* Returns a list of insights in your organization. You can specify which insights are
* returned by their start time, one or more statuses (ONGOING
,
- * CLOSED
, and CLOSED
), one or more severities
- * (LOW
, MEDIUM
, and HIGH
), and type
- * (REACTIVE
or PROACTIVE
). CLOSED
, and CLOSED
), one or more severities
+ * (LOW
, MEDIUM
, and HIGH
), and type
+ * (REACTIVE
or PROACTIVE
).
Use the Filters
parameter to specify status and severity search
* parameters. Use the Type
parameter to specify REACTIVE
or
- * PROACTIVE
in your search.
PROACTIVE
in your search.
*/
public searchOrganizationInsights(
args: SearchOrganizationInsightsCommandInput,
@@ -951,6 +993,38 @@ export class DevOpsGuru extends DevOpsGuruClient {
}
}
+ /**
+ * Updates the event source configuration.
+ */ + public updateEventSourcesConfig( + args: UpdateEventSourcesConfigCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates the collection of resources that DevOps Guru analyzes. * The two types of Amazon Web Services resource collections supported are Amazon Web Services CloudFormation stacks and diff --git a/clients/client-devops-guru/src/DevOpsGuruClient.ts b/clients/client-devops-guru/src/DevOpsGuruClient.ts index 0984359cfc81..8e2f5350c256 100644 --- a/clients/client-devops-guru/src/DevOpsGuruClient.ts +++ b/clients/client-devops-guru/src/DevOpsGuruClient.ts @@ -64,6 +64,10 @@ import { DescribeAccountOverviewCommandOutput, } from "./commands/DescribeAccountOverviewCommand"; import { DescribeAnomalyCommandInput, DescribeAnomalyCommandOutput } from "./commands/DescribeAnomalyCommand"; +import { + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, +} from "./commands/DescribeEventSourcesConfigCommand"; import { DescribeFeedbackCommandInput, DescribeFeedbackCommandOutput } from "./commands/DescribeFeedbackCommand"; import { DescribeInsightCommandInput, DescribeInsightCommandOutput } from "./commands/DescribeInsightCommand"; import { @@ -123,6 +127,10 @@ import { StartCostEstimationCommandInput, StartCostEstimationCommandOutput, } from "./commands/StartCostEstimationCommand"; +import { + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, +} from "./commands/UpdateEventSourcesConfigCommand"; import { UpdateResourceCollectionCommandInput, UpdateResourceCollectionCommandOutput, @@ -138,6 +146,7 @@ export type ServiceInputTypes = | DescribeAccountHealthCommandInput | DescribeAccountOverviewCommandInput | DescribeAnomalyCommandInput + | DescribeEventSourcesConfigCommandInput | DescribeFeedbackCommandInput | DescribeInsightCommandInput | DescribeOrganizationHealthCommandInput @@ -158,6 +167,7 @@ export type ServiceInputTypes = | SearchInsightsCommandInput | SearchOrganizationInsightsCommandInput | StartCostEstimationCommandInput + | UpdateEventSourcesConfigCommandInput | UpdateResourceCollectionCommandInput | UpdateServiceIntegrationCommandInput; @@ -166,6 +176,7 @@ export type ServiceOutputTypes = | DescribeAccountHealthCommandOutput | DescribeAccountOverviewCommandOutput | DescribeAnomalyCommandOutput + | DescribeEventSourcesConfigCommandOutput | DescribeFeedbackCommandOutput | DescribeInsightCommandOutput | DescribeOrganizationHealthCommandOutput @@ -186,6 +197,7 @@ export type ServiceOutputTypes = | SearchInsightsCommandOutput | SearchOrganizationInsightsCommandOutput | StartCostEstimationCommandOutput + | UpdateEventSourcesConfigCommandOutput | UpdateResourceCollectionCommandOutput | UpdateServiceIntegrationCommandOutput; diff --git a/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts b/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts new file mode 100644 index 000000000000..93728a76c465 --- /dev/null +++ b/clients/client-devops-guru/src/commands/DescribeEventSourcesConfigCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient"; +import { DescribeEventSourcesConfigRequest, DescribeEventSourcesConfigResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeEventSourcesConfigCommand, + serializeAws_restJson1DescribeEventSourcesConfigCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeEventSourcesConfigCommandInput extends DescribeEventSourcesConfigRequest {} +export interface DescribeEventSourcesConfigCommandOutput extends DescribeEventSourcesConfigResponse, __MetadataBearer {} + +/** + *
This operation lists details about a DevOps Guru event source that is shared with your
 account.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, DescribeEventSourcesConfigCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, DescribeEventSourcesConfigCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new DescribeEventSourcesConfigCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeEventSourcesConfigCommandInput} for command's `input` shape. + * @see {@link DescribeEventSourcesConfigCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class DescribeEventSourcesConfigCommand extends $Command< + DescribeEventSourcesConfigCommandInput, + DescribeEventSourcesConfigCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeEventSourcesConfigCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack Returns a list of insights in your organization. You can specify which insights are
* returned by their start time, one or more statuses (ONGOING
,
- * CLOSED
, and CLOSED
), one or more severities
- * (LOW
, MEDIUM
, and HIGH
), and type
- * (REACTIVE
or PROACTIVE
).
CLOSED
, and CLOSED
), one or more severities
+ * (LOW
, MEDIUM
, and HIGH
), and type
+ * (REACTIVE
or PROACTIVE
).
* Use the Filters
parameter to specify status and severity search
* parameters. Use the Type
parameter to specify REACTIVE
or
- * PROACTIVE
in your search.
PROACTIVE
in your search.
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
diff --git a/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts b/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts
new file mode 100644
index 000000000000..4db12b10b963
--- /dev/null
+++ b/clients/client-devops-guru/src/commands/UpdateEventSourcesConfigCommand.ts
@@ -0,0 +1,95 @@
+import { getSerdePlugin } from "@aws-sdk/middleware-serde";
+import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
+import { Command as $Command } from "@aws-sdk/smithy-client";
+import {
+ FinalizeHandlerArguments,
+ Handler,
+ HandlerExecutionContext,
+ HttpHandlerOptions as __HttpHandlerOptions,
+ MetadataBearer as __MetadataBearer,
+ MiddlewareStack,
+ SerdeContext as __SerdeContext,
+} from "@aws-sdk/types";
+
+import { DevOpsGuruClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DevOpsGuruClient";
+import { UpdateEventSourcesConfigRequest, UpdateEventSourcesConfigResponse } from "../models/models_0";
+import {
+ deserializeAws_restJson1UpdateEventSourcesConfigCommand,
+ serializeAws_restJson1UpdateEventSourcesConfigCommand,
+} from "../protocols/Aws_restJson1";
+
+export interface UpdateEventSourcesConfigCommandInput extends UpdateEventSourcesConfigRequest {}
+export interface UpdateEventSourcesConfigCommandOutput extends UpdateEventSourcesConfigResponse, __MetadataBearer {}
+
+/**
+ * Updates the event source configuration.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DevOpsGuruClient, UpdateEventSourcesConfigCommand } from "@aws-sdk/client-devops-guru"; // ES Modules import + * // const { DevOpsGuruClient, UpdateEventSourcesConfigCommand } = require("@aws-sdk/client-devops-guru"); // CommonJS import + * const client = new DevOpsGuruClient(config); + * const command = new UpdateEventSourcesConfigCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateEventSourcesConfigCommandInput} for command's `input` shape. + * @see {@link UpdateEventSourcesConfigCommandOutput} for command's `response` shape. + * @see {@link DevOpsGuruClientResolvedConfig | config} for DevOpsGuruClient's `config` shape. + * + */ +export class UpdateEventSourcesConfigCommand extends $Command< + UpdateEventSourcesConfigCommandInput, + UpdateEventSourcesConfigCommandOutput, + DevOpsGuruClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateEventSourcesConfigCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackInformation about your account's integration with Amazon CodeGuru Profiler.
+ */ +export interface AmazonCodeGuruProfilerIntegration { + /** + *The status of the CodeGuru Profiler integration.
+ */ + Status?: EventSourceOptInStatus | string; +} + +export namespace AmazonCodeGuruProfilerIntegration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AmazonCodeGuruProfilerIntegration): any => ({ + ...obj, + }); +} + /** * A time range that specifies when DevOps Guru opens and then closes an anomaly. This
* is different from AnomalyTimeRange
, which specifies the time range when
@@ -420,10 +444,10 @@ export namespace AnomalyReportedTimeRange {
}
/**
- *
The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in - * the generation of an anomaly. When DevOps Guru detects multiple related anomalies, it creates - * and insight with details about the anomalous behavior and suggestions about how to correct the - * problem.
+ *The Amazon Web Services resources in which DevOps Guru detected unusual behavior that resulted in the + * generation of an anomaly. When DevOps Guru detects multiple related anomalies, it creates and + * insight with details about the anomalous behavior and suggestions about how to correct + * the problem.
*/ export interface AnomalyResource { /** @@ -520,8 +544,8 @@ export interface CloudWatchMetricsDataSummary { TimestampMetricValuePairList?: TimestampMetricValuePair[]; /** - *This is an enum of the status showing whether the metric value pair list has partial or - * complete data, or if there was an error.
+ *This is an enum of the status showing whether the metric value pair list has partial + * or complete data, or if there was an error.
*/ StatusCode?: CloudWatchMetricDataStatusCode | string; } @@ -600,15 +624,14 @@ export namespace CloudWatchMetricsDetail { /** *A logical grouping of Performance Insights metrics for a related subject area. For example, the
- * db.sql
dimension group consists of the following dimensions:
- * db.sql.id
, db.sql.db_id
, db.sql.statement
, and
- * db.sql.tokenized_id
.
Each response element returns a maximum of 500 bytes. For larger elements, such as SQL statements, - * only the first 500 bytes are returned.
- *Amazon RDS Performance Insights enables you to monitor and explore different
+ * db.sql
dimension group consists of the following dimensions:
+ * db.sql.id
, db.sql.db_id
, db.sql.statement
,
+ * and db.sql.tokenized_id
.
Each response element returns a maximum of 500 bytes. For larger elements, such as + * SQL statements, only the first 500 bytes are returned.
+ *Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -631,128 +654,144 @@ export interface PerformanceInsightsMetricDimensionGroup { /** *
The name of the dimension group. Its valid values are:
* - *
- * db
- The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS PostgreSQL,
- * Aurora MySQL, Amazon RDS MySQL, and MariaDB)
+ * db
- The name of the database to which the client is connected
+ * (only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)
- * db.application
- The name of the application that is connected to the database (only Aurora
- * PostgreSQL and RDS PostgreSQL)
+ * db.application
- The name of the application that is connected to
+ * the database (only Aurora PostgreSQL and RDS PostgreSQL)
- * db.host
- The host name of the connected client (all engines)
+ * db.host
- The host name of the connected client (all
+ * engines)
- * db.session_type
- The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)
+ * db.session_type
- The type of the current session (only Aurora PostgreSQL
+ * and RDS PostgreSQL)
- * db.sql
- The SQL that is currently executing (all engines)
+ * db.sql
- The SQL that is currently executing (all engines)
- * db.sql_tokenized
- The SQL digest (all engines)
+ * db.sql_tokenized
- The SQL digest (all engines)
- * db.wait_event
- The event for which the database backend is waiting (all engines)
+ * db.wait_event
- The event for which the database backend is waiting
+ * (all engines)
- * db.wait_event_type
- The type of event for which the database backend is waiting (all engines)
+ * db.wait_event_type
- The type of event for which the database
+ * backend is waiting (all engines)
- * db.user
- The user logged in to the database (all engines)
+ * db.user
- The user logged in to the database (all engines)
A list of specific dimensions from a dimension group. If this parameter is not present, - * then it signifies that all of the dimensions in the group were requested or are present in - * the response.
- *Valid values for elements in the Dimensions
array are:
A list of specific dimensions from a dimension group. If this parameter is not + * present, then it signifies that all of the dimensions in the group were requested or are + * present in the response.
+ *Valid values for elements in the Dimensions
array are:
- * db.application.name
- The name of the application that is connected to the database (only
- * Aurora PostgreSQL and RDS PostgreSQL)
+ * db.application.name
- The name of the application that is connected
+ * to the database (only Aurora PostgreSQL and RDS PostgreSQL)
- * db.host.id
- The host ID of the connected client (all engines)
+ * db.host.id
- The host ID of the connected client (all
+ * engines)
- * db.host.name
- The host name of the connected client (all engines)
+ * db.host.name
- The host name of the connected client (all
+ * engines)
- * db.name
- The name of the database to which the client is connected (only Aurora PostgreSQL, Amazon RDS
- * PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)
+ * db.name
- The name of the database to which the client is connected
+ * (only Aurora PostgreSQL, Amazon RDS PostgreSQL, Aurora MySQL, Amazon RDS MySQL, and MariaDB)
- * db.session_type.name
- The type of the current session (only Aurora PostgreSQL and RDS PostgreSQL)
+ * db.session_type.name
- The type of the current session (only Aurora
+ * PostgreSQL and RDS PostgreSQL)
- * db.sql.id
- The SQL ID generated by Performance Insights (all engines)
+ * db.sql.id
- The SQL ID generated by Performance Insights (all engines)
- * db.sql.db_id
- The SQL ID generated by the database (all engines)
+ * db.sql.db_id
- The SQL ID generated by the database (all
+ * engines)
- * db.sql.statement
- The SQL text that is being executed (all engines)
+ * db.sql.statement
- The SQL text that is being executed (all
+ * engines)
- * db.sql.tokenized_id
- *
+ * db.sql.tokenized_id
+ *
- * db.sql_tokenized.id
- The SQL digest ID generated by Performance Insights (all engines)
+ * db.sql_tokenized.id
- The SQL digest ID generated by Performance Insights (all
+ * engines)
- * db.sql_tokenized.db_id
- SQL digest ID generated by the database (all engines)
+ * db.sql_tokenized.db_id
- SQL digest ID generated by the database
+ * (all engines)
- * db.sql_tokenized.statement
- The SQL digest text (all engines)
+ * db.sql_tokenized.statement
- The SQL digest text (all
+ * engines)
- * db.user.id
- The ID of the user logged in to the database (all engines)
+ * db.user.id
- The ID of the user logged in to the database (all
+ * engines)
- * db.user.name
- The name of the user logged in to the database (all engines)
+ * db.user.name
- The name of the user logged in to the database (all
+ * engines)
- * db.wait_event.name
- The event for which the backend is waiting (all engines)
+ * db.wait_event.name
- The event for which the backend is waiting
+ * (all engines)
- * db.wait_event.type
- The type of event for which the backend is waiting (all engines)
+ * db.wait_event.type
- The type of event for which the backend is
+ * waiting (all engines)
- * db.wait_event_type.name
- The name of the event type for which the backend is waiting (all
- * engines)
+ * db.wait_event_type.name
- The name of the event type for which the
+ * backend is waiting (all engines)
A single query to be processed. Use these parameters to
- * query the Performance Insights A single query to be processed. Use these parameters to query the Performance Insights
+ * Amazon RDS Performance Insights enables you to monitor and explore different
+ * GetResourceMetrics
API to retrieve the metrics
- * for an anomaly. For more information, see
+ *
in the Amazon RDS Performance Insights API
+ * Reference.GetResourceMetrics
API to retrieve the metrics for an anomaly. For more
+ * information, see
* GetResourceMetrics
- *
- * in the Amazon RDS Performance Insights API Reference.
Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -801,47 +839,53 @@ export namespace PerformanceInsightsMetricDimensionGroup { */ export interface PerformanceInsightsMetricQuery { /** - *
The name of the meteric used used when querying an Performance Insights GetResourceMetrics
API for
- * anomaly metrics.
The name of the meteric used used when querying an Performance Insights
+ * GetResourceMetrics
API for anomaly metrics.
Valid values for Metric
are:
Valid values for Metric
are:
- * db.load.avg
- a scaled representation of the number of active sessions
- * for the database engine.
+ * db.load.avg
- a scaled representation of the number of active sessions for the
+ * database engine.
- * db.sampledload.avg
- the raw number of active sessions for the
- * database engine.
+ * db.sampledload.avg
- the raw number of active sessions for the database
+ * engine.
If the number of active sessions is less than an internal Performance Insights threshold, db.load.avg
and db.sampledload.avg
- * are the same value. If the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with db.load.avg
- * showing the scaled values, db.sampledload.avg
showing the raw values, and db.sampledload.avg
less than db.load.avg
.
- * For most use cases, you can query db.load.avg
only.
If the number of active sessions is less than an internal Performance Insights threshold,
+ * db.load.avg
and db.sampledload.avg
are the same value. If
+ * the number of active sessions is greater than the internal threshold, Performance Insights samples the active sessions, with
+ * db.load.avg
showing the scaled values, db.sampledload.avg
+ * showing the raw values, and db.sampledload.avg
less than
+ * db.load.avg
. For most use cases, you can query db.load.avg
+ * only.
The specification for how to aggregate the data points from a Performance Insights GetResourceMetrics
API query. The
- * Performance Insights query returns all of the dimensions within that group,
- * unless you provide the names of specific dimensions within that group. You can also request
- * that Performance Insights return a limited number of values for a dimension.
The specification for how to aggregate the data points from a Performance Insights
+ * GetResourceMetrics
API query. The Performance Insights query returns all of the
+ * dimensions within that group, unless you provide the names of specific dimensions within
+ * that group. You can also request that Performance Insights return a limited number of values for a
+ * dimension.
One or more filters to apply to a Performance Insights GetResourceMetrics
API query. Restrictions:
One or more filters to apply to a Performance Insights GetResourceMetrics
API query.
+ * Restrictions:
Any number of filters by the same dimension, as specified in the GroupBy
parameter.
Any number of filters by the same dimension, as specified in the
+ * GroupBy
parameter.
A single filter for any other dimension in this dimension group.
- *A single filter for any other dimension in this dimension group.
+ * *A reference value to compare Performance Insights metrics against to determine if the metrics - * demonstrate anomalous behavior.
+ * demonstrate anomalous behavior. */ export interface PerformanceInsightsReferenceScalar { /** @@ -896,19 +940,22 @@ export namespace PerformanceInsightsReferenceScalar { } /** - *Reference scalar values and other metrics that DevOps Guru displays on a graph in its console along with the actual metrics it - * analyzed. Compare these reference values to your actual metrics to help you understand anomalous behavior that DevOps Guru detected.
+ *Reference scalar values and other metrics that DevOps Guru displays on a graph in its + * console along with the actual metrics it analyzed. Compare these reference values to + * your actual metrics to help you understand anomalous behavior that DevOps Guru + * detected.
*/ export interface PerformanceInsightsReferenceComparisonValues { /** - *A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This reference value is used - * to determine if an actual metric value should be considered anomalous.
+ *A scalar value DevOps Guru for a metric that DevOps Guru compares to actual metric values. This + * reference value is used to determine if an actual metric value should be considered + * anomalous.
*/ ReferenceScalar?: PerformanceInsightsReferenceScalar; /** - *A metric that DevOps Guru compares to actual metric values. This reference metric is used - * to determine if an actual metric should be considered anomalous.
+ *A metric that DevOps Guru compares to actual metric values. This reference metric is used to + * determine if an actual metric should be considered anomalous.
*/ ReferenceMetric?: PerformanceInsightsReferenceMetric; } @@ -923,8 +970,8 @@ export namespace PerformanceInsightsReferenceComparisonValues { } /** - *Reference data used to evaluate Performance Insights to determine if its performance - * is anomalous or not.
+ *Reference data used to evaluate Performance Insights to determine if its performance is anomalous or + * not.
*/ export interface PerformanceInsightsReferenceData { /** @@ -934,10 +981,9 @@ export interface PerformanceInsightsReferenceData { /** *The specific reference values used to evaluate the Performance Insights. For more information, see
- *
+ *
* PerformanceInsightsReferenceComparisonValues
- *
.
- *
Details about Performance Insights metrics.
- * - *Amazon RDS Performance Insights enables you to monitor and explore different + *
Amazon RDS Performance Insights enables you to monitor and explore different * dimensions of database load based on data captured from a running DB instance. * DB load is measured as average active sessions. Performance Insights provides the * data to API consumers as a two-dimensional time-series dataset. The time dimension @@ -1009,20 +1054,16 @@ export interface PerformanceInsightsMetricsDetail { Unit?: string; /** - *
A single query to be processed for the metric. For more information, see
- * A single query to be processed for the metric. For more information, see
- * For more information, see
- * For more information, see An array of An array of Metadata about an anomaly. The anomaly is detected using analysis of the metric data
 over a period of time The source of the anomaly. The name of the anomaly's resource. The anomaly's resource type. A collection of Amazon Web Services stags. Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support
+ * Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support
* tagging, so you can assign the same tag to resources from different services to indicate
* that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB
* table resource that you assign to an Lambda function. For more information about
@@ -1337,7 +1407,7 @@ export interface TagCollection {
/**
* The values in an Amazon Web Services tag collection. The tag's value is an optional field used to associate a string with
+ * The tag's value is an optional field used to associate a string with
* the tag key (for example, The Amazon Web Services tags that are used by resources in the resource collection. Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support
+ * Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support
* tagging, so you can assign the same tag to resources from different services to indicate
* that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB
* table resource that you assign to an Lambda function. For more information about
@@ -1490,6 +1560,16 @@ export interface ProactiveAnomaly {
* threshold is related to the anomalous behavior that generated this anomaly. The metadata for the anomaly. Information about a resource in which DevOps Guru detected anomalous behavior. Describes the event sources. The name of the event source. The ID of the insight for which the feedback was provided.
+ *
* PerformanceInsightsMetricQuery
*
.
+ *
.
* PerformanceInsightsReferenceData
- *
.
- * PerformanceInsightsMetricsDetail
objects that contain information
- * about analyzed Performance Insights metrics that show anomalous behavior.PerformanceInsightsMetricsDetail
objects that contain
+ * information about analyzed Performance Insights metrics that show anomalous behavior.111122223333
, Production
, or a team
* name). The key and value are the tag's key pair.
* Omitting the tag value is the same as using an empty
@@ -1371,7 +1441,7 @@ export interface ResourceCollection {
/**
*
Describes the proactive insight.
+ */ + Description?: string; } export namespace ProactiveInsight { @@ -1855,6 +1986,11 @@ export interface ReactiveInsight { * the creation of OpstItems insights before they are created for each insight. */ SsmOpsItemId?: string; + + /** + *Describes the reactive insight.
+ */ + Description?: string; } export namespace ReactiveInsight { @@ -2187,7 +2323,7 @@ export namespace ServiceHealth { /** *Information about the health of Amazon Web Services resources in your account that are specified by - * an Amazon Web Services tag key.
+ * an Amazon Web Services tag key. */ export interface TagHealth { /** @@ -2210,7 +2346,7 @@ export interface TagHealth { /** *The value in an Amazon Web Services tag.
- *The tag's value is an optional field used to associate a string with + *
The tag's value is an optional field used to associate a string with
* the tag key (for example, 111122223333
, Production
, or a team
* name). The key and value are the tag's key pair.
* Omitting the tag value is the same as using an empty
@@ -2220,9 +2356,9 @@ export interface TagHealth {
TagValue?: string;
/**
- *
Information about the health of the Amazon Web Services resources in your account that are - * specified by an Amazon Web Services tag, including the number of open proactive, open reactive - * insights, and the Mean Time to Recover (MTTR) of closed insights.
+ *Information about the health of the Amazon Web Services resources in your account that are specified + * by an Amazon Web Services tag, including the number of open proactive, open reactive insights, and the + * Mean Time to Recover (MTTR) of closed insights.
*/ Insight?: InsightHealth; } @@ -2239,7 +2375,7 @@ export namespace TagHealth { export interface DescribeOrganizationResourceCollectionHealthResponse { /** *The returned CloudFormationHealthOverview
object that contains an
- * InsightHealthOverview
object with the requested system health
+ * InsightHealthOverview
object with the requested system health
* information.
The Amazon Web Services tags that are used by resources in the resource collection.
- *Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *
Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -2580,12 +2716,12 @@ export namespace CloudFormationCostEstimationResourceCollectionFilter { } /** - *
Information about a collection of Amazon Web Services resources that are identified by an - * Amazon Web Services tag. This collection of resources is used to create a monthly cost estimate - * for DevOps Guru to analyze Amazon Web Services resources. The maximum number of tags you can specify for a - * cost estimate is one. The estimate created is for the cost to analyze the Amazon Web Services - * resources defined by the tag. For more information, see Stacks in the - * Amazon Web Services CloudFormation User Guide.
+ *Information about a collection of Amazon Web Services resources that are identified by an Amazon Web Services tag. + * This collection of resources is used to create a monthly cost estimate for DevOps Guru to + * analyze Amazon Web Services resources. The maximum number of tags you can specify for a cost estimate + * is one. The estimate created is for the cost to analyze the Amazon Web Services resources defined by + * the tag. For more information, see Stacks in the + * Amazon Web Services CloudFormation User Guide.
*/ export interface TagCostEstimationResourceCollectionFilter { /** @@ -2608,7 +2744,7 @@ export interface TagCostEstimationResourceCollectionFilter { /** *The values in an Amazon Web Services tag collection.
- *The tag's value is an optional field used to associate a string with + *
The tag's value is an optional field used to associate a string with
* the tag key (for example, 111122223333
, Production
, or a team
* name). The key and value are the tag's key pair.
* Omitting the tag value is the same as using an empty
@@ -2642,9 +2778,9 @@ export interface CostEstimationResourceCollectionFilter {
CloudFormation?: CloudFormationCostEstimationResourceCollectionFilter;
/**
- *
The Amazon Web Services tags used to filter the resource collection that is used for - * a cost estimate.
- *Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *
The Amazon Web Services tags used to filter the resource collection that is used for a cost + * estimate.
+ *Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -2813,8 +2949,8 @@ export namespace CloudFormationCollectionFilter { } /** - *
A collection of Amazon Web Services tags used to filter insights. This is used to return insights generated from - * only resources that contain the tags in the tag collection.
+ *A collection of Amazon Web Services tags used to filter insights. This is used to return insights + * generated from only resources that contain the tags in the tag collection.
*/ export interface TagCollectionFilter { /** @@ -2837,7 +2973,7 @@ export interface TagCollectionFilter { /** *The values in an Amazon Web Services tag collection.
- *The tag's value is an optional field used to associate a string with + *
The tag's value is an optional field used to associate a string with
* the tag key (for example, 111122223333
, Production
, or a team
* name). The key and value are the tag's key pair.
* Omitting the tag value is the same as using an empty
@@ -2871,7 +3007,7 @@ export interface ResourceCollectionFilter {
/**
*
The Amazon Web Services tags used to filter the resources in the resource collection.
- *Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *
Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support * tagging, so you can assign the same tag to resources from different services to indicate * that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB * table resource that you assign to an Lambda function. For more information about @@ -3080,6 +3216,16 @@ export interface ProactiveAnomalySummary { * threshold is related to the anomalous behavior that generated this anomaly.
*/ Limit?: number; + + /** + *Returns the metadata of the source.
+ */ + SourceMetadata?: AnomalySourceMetadata; + + /** + *Information about a resource in which DevOps Guru detected anomalous behavior.
+ */ + AnomalyResources?: AnomalyResource[]; } export namespace ProactiveAnomalySummary { @@ -3918,7 +4064,7 @@ export namespace ListOrganizationInsightsRequest { /** *Details about a proactive insight. This object is returned by
- * DescribeInsight
.
DescribeInsight
.
*/
export interface ProactiveOrganizationInsightSummary {
/**
@@ -3942,7 +4088,8 @@ export interface ProactiveOrganizationInsightSummary {
Name?: string;
/**
- * An array of severity values used to search for insights. For more information, see + *
An array of severity values used to search for insights. + * For more information, see * Understanding * insight severities in the Amazon DevOps Guru User Guide.
*/ @@ -3990,7 +4137,7 @@ export namespace ProactiveOrganizationInsightSummary { /** *Information about a reactive insight. This object is returned by
- * DescribeInsight
.
DescribeInsight
.
*/
export interface ReactiveOrganizationInsightSummary {
/**
@@ -4014,7 +4161,8 @@ export interface ReactiveOrganizationInsightSummary {
Name?: string;
/**
- * An array of severity values used to search for insights. For more information, see + *
An array of severity values used to search for insights. + * For more information, see * Understanding * insight severities in the Amazon DevOps Guru User Guide.
*/ @@ -4139,11 +4287,11 @@ export interface RecommendationRelatedAnomalyResource { Name?: string; /** - * The type of the resource. Resource types take the same form that is
- * used by Amazon Web Services CloudFormation resource type identifiers, service-provider::service-name::data-type-name
.
- * For example, AWS::RDS::DBCluster
. For more information, see
- * Amazon Web Services resource and
- * property types reference in the Amazon Web Services CloudFormation User Guide.
The type of the resource. Resource types take the same form that is used by Amazon Web Services CloudFormation
+ * resource type identifiers, service-provider::service-name::data-type-name
.
+ * For example, AWS::RDS::DBCluster
. For more information, see Amazon Web Services
+ * resource and property types reference in the Amazon Web Services CloudFormation User
+ * Guide.
The category type of the recommendation.
+ */ + Category?: string; } export namespace Recommendation { @@ -4586,7 +4739,7 @@ export interface SearchOrganizationInsightsRequest { /** * The type of insights you are searching for (REACTIVE
or
- * PROACTIVE
).
PROACTIVE
).
*/
Type: InsightType | string | undefined;
}
@@ -4661,6 +4814,33 @@ export namespace StartCostEstimationResponse {
});
}
+export interface UpdateEventSourcesConfigRequest {
+ /**
+ * The name of the event source.
+ */ + EventSources?: EventSourcesConfig; +} + +export namespace UpdateEventSourcesConfigRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventSourcesConfigRequest): any => ({ + ...obj, + }); +} + +export interface UpdateEventSourcesConfigResponse {} + +export namespace UpdateEventSourcesConfigResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEventSourcesConfigResponse): any => ({ + ...obj, + }); +} + export enum UpdateResourceCollectionAction { ADD = "ADD", REMOVE = "REMOVE", @@ -4689,7 +4869,7 @@ export namespace UpdateCloudFormationCollectionFilter { /** *A new collection of Amazon Web Services resources that are defined by an Amazon Web Services tag or tag - * key/value pair.
+ * key/value pair. */ export interface UpdateTagCollectionFilter { /** @@ -4712,7 +4892,7 @@ export interface UpdateTagCollectionFilter { /** *The values in an Amazon Web Services tag collection.
- *The tag's value is an optional field used to associate a string with + *
The tag's value is an optional field used to associate a string with
* the tag key (for example, 111122223333
, Production
, or a team
* name). The key and value are the tag's key pair.
* Omitting the tag value is the same as using an empty
@@ -4742,7 +4922,7 @@ export interface UpdateResourceCollectionFilter {
/**
*
The updated Amazon Web Services tags used to filter the resources in the resource collection.
- *Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support + *
Tags help you identify and organize your Amazon Web Services resources. Many Amazon Web Services services support
* tagging, so you can assign the same tag to resources from different services to indicate
* that the resources are related. For example, you can assign the same tag to an Amazon DynamoDB
* table resource that you assign to an Lambda function. For more information about
diff --git a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts
index 18571a6835c5..56b2edc2aca6 100644
--- a/clients/client-devops-guru/src/protocols/Aws_restJson1.ts
+++ b/clients/client-devops-guru/src/protocols/Aws_restJson1.ts
@@ -32,6 +32,10 @@ import {
DescribeAccountOverviewCommandOutput,
} from "../commands/DescribeAccountOverviewCommand";
import { DescribeAnomalyCommandInput, DescribeAnomalyCommandOutput } from "../commands/DescribeAnomalyCommand";
+import {
+ DescribeEventSourcesConfigCommandInput,
+ DescribeEventSourcesConfigCommandOutput,
+} from "../commands/DescribeEventSourcesConfigCommand";
import { DescribeFeedbackCommandInput, DescribeFeedbackCommandOutput } from "../commands/DescribeFeedbackCommand";
import { DescribeInsightCommandInput, DescribeInsightCommandOutput } from "../commands/DescribeInsightCommand";
import {
@@ -91,6 +95,10 @@ import {
StartCostEstimationCommandInput,
StartCostEstimationCommandOutput,
} from "../commands/StartCostEstimationCommand";
+import {
+ UpdateEventSourcesConfigCommandInput,
+ UpdateEventSourcesConfigCommandOutput,
+} from "../commands/UpdateEventSourcesConfigCommand";
import {
UpdateResourceCollectionCommandInput,
UpdateResourceCollectionCommandOutput,
@@ -104,9 +112,11 @@ import {
AccessDeniedException,
AccountHealth,
AccountInsightHealth,
+ AmazonCodeGuruProfilerIntegration,
AnomalyReportedTimeRange,
AnomalyResource,
AnomalySourceDetails,
+ AnomalySourceMetadata,
AnomalyTimeRange,
CloudFormationCollection,
CloudFormationCollectionFilter,
@@ -121,6 +131,7 @@ import {
EndTimeRange,
Event,
EventResource,
+ EventSourcesConfig,
EventTimeRange,
InsightFeedback,
InsightHealth,
@@ -296,6 +307,28 @@ export const serializeAws_restJson1DescribeAnomalyCommand = async (
});
};
+export const serializeAws_restJson1DescribeEventSourcesConfigCommand = async (
+ input: DescribeEventSourcesConfigCommandInput,
+ context: __SerdeContext
+): Promise<__HttpRequest> => {
+ const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
+ const headers: any = {
+ "content-type": "application/json",
+ };
+ const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/event-sources";
+ let body: any;
+ body = "";
+ return new __HttpRequest({
+ protocol,
+ hostname,
+ port,
+ method: "POST",
+ headers,
+ path: resolvedPath,
+ body,
+ });
+};
+
export const serializeAws_restJson1DescribeFeedbackCommand = async (
input: DescribeFeedbackCommandInput,
context: __SerdeContext
@@ -907,6 +940,33 @@ export const serializeAws_restJson1StartCostEstimationCommand = async (
});
};
+export const serializeAws_restJson1UpdateEventSourcesConfigCommand = async (
+ input: UpdateEventSourcesConfigCommandInput,
+ context: __SerdeContext
+): Promise<__HttpRequest> => {
+ const { hostname, protocol = "https", port, path: basePath } = await context.endpoint();
+ const headers: any = {
+ "content-type": "application/json",
+ };
+ const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/event-sources";
+ let body: any;
+ body = JSON.stringify({
+ ...(input.EventSources !== undefined &&
+ input.EventSources !== null && {
+ EventSources: serializeAws_restJson1EventSourcesConfig(input.EventSources, context),
+ }),
+ });
+ return new __HttpRequest({
+ protocol,
+ hostname,
+ port,
+ method: "PUT",
+ headers,
+ path: resolvedPath,
+ body,
+ });
+};
+
export const serializeAws_restJson1UpdateResourceCollectionCommand = async (
input: UpdateResourceCollectionCommandInput,
context: __SerdeContext
@@ -1210,6 +1270,59 @@ const deserializeAws_restJson1DescribeAnomalyCommandError = async (
}
};
+export const deserializeAws_restJson1DescribeEventSourcesConfigCommand = async (
+ output: __HttpResponse,
+ context: __SerdeContext
+): Promise The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set Default: The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set Default: 999999
.TargetCapacityUnitType
to vcpu
or
+ * memory-mib
, the price protection threshold is applied based on the
+ * per-vCPU or per-memory price instead of the per-instance price.100
* 999999
.TargetCapacityUnitType
to vcpu
or
+ * memory-mib
, the price protection threshold is applied based on the
+ * per-vCPU or per-memory price instead of the per-instance price.20
*
For more information, see EC2 Fleet
+ * For more information, see EC2 Fleet
* request types in the Amazon EC2 User Guide. The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set Default: The parameter accepts an integer, which Amazon EC2 interprets as a percentage. To turn off price protection, specify a high value, such as This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements. If you set Default: The IP protocol assigned to this IPAM pool. You must choose either IPv4 or IPv6 protocol for a pool. If selected, IPAM will continuously look for resources within the CIDR range of this pool
diff --git a/clients/client-ec2/src/models/models_3.ts b/clients/client-ec2/src/models/models_3.ts
index 3666e68f0117..3d8cad6dfb5d 100644
--- a/clients/client-ec2/src/models/models_3.ts
+++ b/clients/client-ec2/src/models/models_3.ts
@@ -3704,6 +3704,7 @@ export type ImageAttributeName =
| "bootMode"
| "description"
| "kernel"
+ | "lastLaunchedTime"
| "launchPermission"
| "productCodes"
| "ramdisk"
@@ -3824,9 +3825,20 @@ export interface ImageAttribute {
SriovNetSupport?: AttributeValue;
/**
- * Describes a value for a resource attribute that is a String. The boot mode. The date and time, in ISO 8601 date-time
+ * format, when the AMI was last used to launch an EC2 instance. When the AMI is used,
+ * there is a 24-hour delay before that usage is reported.
+ * When an image is pushed to a repository, each image layer is checked to verify if it
* has been uploaded before. If it has been uploaded, then the image layer is
* skipped. This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the 999999
.TargetCapacityUnitType
to vcpu
or
+ * memory-mib
, the price protection threshold is applied based on the
+ * per-vCPU or per-memory price instead of the per-instance price.100
* 999999
.TargetCapacityUnitType
to vcpu
or
+ * memory-mib
, the price protection threshold is applied based on the
+ * per-vCPU or per-memory price instead of the per-instance price.20
* lastLaunchedTime
data is available starting April 2017.docker
CLI to pull, tag, and push images.
When an image is pushed, the CompleteLayerUpload API is called once per each new image * layer to verify that the upload has completed.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer * that is not already cached.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pushed, the InitiateLayerUpload API is called once per image layer * that has not already been uploaded. Whether or not an image layer has been uploaded is * determined by the BatchCheckLayerAvailability API action.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
Updates the image scanning configuration for the specified repository.
+ *The PutImageScanningConfiguration
API is being deprecated, in favor
+ * of specifying the image scanning configuration at the registry level. For more
+ * information, see PutRegistryScanningConfiguration.
Updates the image scanning configuration for the specified repository.
*/ public putImageScanningConfiguration( args: PutImageScanningConfigurationCommandInput, @@ -1570,7 +1575,7 @@ export class ECR extends ECRClient { *When an image is pushed, each new image layer is uploaded in parts. The maximum size * of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API * is called once per each new image layer part.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pushed to a repository, each image layer is checked to verify if it * has been uploaded before. If it has been uploaded, then the image layer is * skipped.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pushed, the CompleteLayerUpload API is called once per each new image * layer to verify that the upload has completed.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pulled, the GetDownloadUrlForLayer API is called once per image layer * that is not already cached.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
When an image is pushed, the InitiateLayerUpload API is called once per image layer * that has not already been uploaded. Whether or not an image layer has been uploaded is * determined by the BatchCheckLayerAvailability API action.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
Updates the image scanning configuration for the specified repository.
+ *The PutImageScanningConfiguration
API is being deprecated, in favor
+ * of specifying the image scanning configuration at the registry level. For more
+ * information, see PutRegistryScanningConfiguration.
Updates the image scanning configuration for the specified repository.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ecr/src/commands/UploadLayerPartCommand.ts b/clients/client-ecr/src/commands/UploadLayerPartCommand.ts index 64cfb12a296c..c6fc403ac8ae 100644 --- a/clients/client-ecr/src/commands/UploadLayerPartCommand.ts +++ b/clients/client-ecr/src/commands/UploadLayerPartCommand.ts @@ -26,7 +26,7 @@ export interface UploadLayerPartCommandOutput extends UploadLayerPartResponse, _ *When an image is pushed, each new image layer is uploaded in parts. The maximum size * of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API * is called once per each new image layer part.
- *This operation is used by the Amazon ECR proxy and is not generally used by
* customers for pulling and pushing images. In most cases, you should use the docker
CLI to pull, tag, and push images.
The metadata that you apply to a resource to help you categorize and organize them. - * Each tag consists of a key and an optional value, both of which you define. - * Tag keys can have a maximum character length of 128 characters, and tag values can have + *
The metadata to apply to a resource to help you categorize and organize them. Each tag + * consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have * a maximum length of 256 characters.
*/ export interface Tag { @@ -964,8 +963,7 @@ export interface Tag { Key?: string; /** - *The optional part of a key-value pair that make up a tag. A value
acts as
- * a descriptor within a tag category (key).
A value
acts as a descriptor within a tag category (key).
The artifact media type of the image.
*/ artifactMediaType?: string; + + /** + *The date and time, expressed in standard JavaScript date format, when Amazon ECR recorded + * the last image pull.
+ *Amazon ECR refreshes the last image pull timestamp at least once every 24 hours. For
+ * example, if you pull an image once a day then the lastRecordedPullTime
+ * timestamp will indicate the exact time that the image was last pulled. However, if
+ * you pull an image once an hour, because Amazon ECR refreshes the
+ * lastRecordedPullTime
timestamp at least once every 24 hours, the
+ * result may not be the exact time that the image was last pulled.
The frequency that scans are performed at for a private registry.
+ *The frequency that scans are performed at for a private registry. When the
+ * ENHANCED
scan type is specified, the supported scan frequencies are
+ * CONTINUOUS_SCAN
and SCAN_ON_PUSH
. When the
+ * BASIC
scan type is specified, the SCAN_ON_PUSH
and
+ * MANUAL
scan frequencies are supported.
The scanning type to set for the registry.
- *By default, the BASIC
scan type is used. When basic scanning is set, you
- * may specify filters to determine which individual repositories, or all repositories, are
- * scanned when new images are pushed. Alternatively, you can do manual scans of images
- * with basic scanning.
When the ENHANCED
scan type is set, Amazon Inspector provides automated, continuous
- * scanning of all repositories in your registry.
When a registry scanning configuration is not defined, by default the
+ * BASIC
scan type is used. When basic scanning is used, you may specify
+ * filters to determine which individual repositories, or all repositories, are scanned
+ * when new images are pushed to those repositories. Alternatively, you can do manual scans
+ * of images with basic scanning.
When the ENHANCED
scan type is set, Amazon Inspector provides automated
+ * vulnerability scanning. You may choose between continuous scanning or scan on push and
+ * you may specify filters to determine which individual repositories, or all repositories,
+ * are scanned.
Tasks for services that don't use a load balancer are considered healthy if they're in
* the RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they're in the RUNNING
state and the container
- * instance that they're hosted on is reported as healthy by the load balancer.
RUNNING
state and are reported as healthy by the load balancer.
* There are two service scheduler strategies available:
*When awsvpcTrunking
is specified, the elastic network interface (ENI)
* limit for any new container instances that support the feature is changed. If
* awsvpcTrunking
is enabled, any new container instances that support the
@@ -2152,8 +2151,7 @@ export class ECS extends ECSClient {
* replacement tasks are considered healthy. Tasks for services that do not use a
* load balancer are considered healthy if they're in the RUNNING
* state. Tasks for services that use a load balancer are considered healthy if
- * they're in the RUNNING
state and the container instance they're
- * hosted on is reported as healthy by the load balancer.
RUNNING
state and are reported as healthy by the load balancer..
* The maximumPercent
parameter represents an upper limit on the
@@ -2210,19 +2208,21 @@ export class ECS extends ECSClient {
* apply to your participation in this preview.
Modifies the parameters of a service.
- *For services using the rolling update (ECS
) deployment controller, the
- * desired count, deployment configuration, network configuration, task placement
- * constraints and strategies, or task definition used can be updated.
For services using the blue/green (CODE_DEPLOY
) deployment controller,
- * only the desired count, deployment configuration, task placement constraints and
- * strategies, and health check grace period can be updated using this API. If the network
- * configuration, platform version, or task definition need to be updated, a new CodeDeploy
- * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired - * count, task placement constraints and strategies, and health check grace period using - * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, create a new task set. For more information, see - * CreateTaskSet.
+ *For services using the rolling update (ECS
) you can update the desired count,
+ * the deployment configuration, the network configuration, load balancers, service
+ * registries, enable ECS managed tags option, propagate tags option, task placement
+ * constraints and strategies, and the task definition. When you update any of these
+ * parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the
+ * desired count, deployment configuration, task placement constraints and strategies,
+ * enable ECS managed tags option, and propagate tags can be updated using this API. If the
+ * network configuration, platform version, task definition, or load balancer need to be
+ * updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, + * task placement constraints and strategies, health check grace period, enable ECS managed + * tags option, and propagate tags option, using this API. If the launch type, load + * balancer, network configuration, platform version, or task definition need to be + * updated, create a new task set For more information, see CreateTaskSet.
*You can add to or subtract from the number of instantiations of a task definition in a
* service by specifying the cluster that the service is running in and a new
* desiredCount
parameter.
RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they're in the RUNNING
state and the
- * container instance they're hosted on is reported as healthy by the load
- * balancer.
+ * considered healthy if they're in the RUNNING
state and are reported
+ * as healthy by the load balancer.
* The maximumPercent
parameter represents an upper limit on the
@@ -2297,6 +2296,7 @@ export class ECS extends ECSClient {
*
When the service scheduler stops running tasks, it attempts to maintain balance across * the Availability Zones in your cluster using the following logic:
*You must have a service-linked role when you update any of the following service properties. + * If you specified a custom IAM role when you created the service, Amazon ECS automatically + * replaces the roleARN associated with the service with the ARN of your service-linked + * role. For more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.
+ *
+ * loadBalancers,
+ *
+ * serviceRegistries
+ *
Tasks for services that don't use a load balancer are considered healthy if they're in
* the RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they're in the RUNNING
state and the container
- * instance that they're hosted on is reported as healthy by the load balancer.
RUNNING
state and are reported as healthy by the load balancer.
* There are two service scheduler strategies available:
*When awsvpcTrunking
is specified, the elastic network interface (ENI)
* limit for any new container instances that support the feature is changed. If
* awsvpcTrunking
is enabled, any new container instances that support the
diff --git a/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts b/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts
index 7a4128678453..b6ee6bc51b9a 100644
--- a/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts
+++ b/clients/client-ecs/src/commands/UpdateContainerInstancesStateCommand.ts
@@ -53,8 +53,7 @@ export interface UpdateContainerInstancesStateCommandOutput
* replacement tasks are considered healthy. Tasks for services that do not use a
* load balancer are considered healthy if they're in the RUNNING
* state. Tasks for services that use a load balancer are considered healthy if
- * they're in the RUNNING
state and the container instance they're
- * hosted on is reported as healthy by the load balancer.
RUNNING
state and are reported as healthy by the load balancer..
* The maximumPercent
parameter represents an upper limit on the
diff --git a/clients/client-ecs/src/commands/UpdateServiceCommand.ts b/clients/client-ecs/src/commands/UpdateServiceCommand.ts
index abfdbc1d1d83..459264fd5be2 100644
--- a/clients/client-ecs/src/commands/UpdateServiceCommand.ts
+++ b/clients/client-ecs/src/commands/UpdateServiceCommand.ts
@@ -29,19 +29,21 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met
* apply to your participation in this preview.
Modifies the parameters of a service.
- *For services using the rolling update (ECS
) deployment controller, the
- * desired count, deployment configuration, network configuration, task placement
- * constraints and strategies, or task definition used can be updated.
For services using the blue/green (CODE_DEPLOY
) deployment controller,
- * only the desired count, deployment configuration, task placement constraints and
- * strategies, and health check grace period can be updated using this API. If the network
- * configuration, platform version, or task definition need to be updated, a new CodeDeploy
- * deployment is created. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired - * count, task placement constraints and strategies, and health check grace period using - * this API. If the launch type, load balancer, network configuration, platform version, or - * task definition need to be updated, create a new task set. For more information, see - * CreateTaskSet.
+ *For services using the rolling update (ECS
) you can update the desired count,
+ * the deployment configuration, the network configuration, load balancers, service
+ * registries, enable ECS managed tags option, propagate tags option, task placement
+ * constraints and strategies, and the task definition. When you update any of these
+ * parameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller, only the
+ * desired count, deployment configuration, task placement constraints and strategies,
+ * enable ECS managed tags option, and propagate tags can be updated using this API. If the
+ * network configuration, platform version, task definition, or load balancer need to be
+ * updated, create a new CodeDeploy deployment. For more information, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired count, + * task placement constraints and strategies, health check grace period, enable ECS managed + * tags option, and propagate tags option, using this API. If the launch type, load + * balancer, network configuration, platform version, or task definition need to be + * updated, create a new task set For more information, see CreateTaskSet.
*You can add to or subtract from the number of instantiations of a task definition in a
* service by specifying the cluster that the service is running in and a new
* desiredCount
parameter.
RUNNING
state. Tasks for services that use a load balancer are
- * considered healthy if they're in the RUNNING
state and the
- * container instance they're hosted on is reported as healthy by the load
- * balancer.
+ * considered healthy if they're in the RUNNING
state and are reported
+ * as healthy by the load balancer.
* The maximumPercent
parameter represents an upper limit on the
@@ -116,6 +117,7 @@ export interface UpdateServiceCommandOutput extends UpdateServiceResponse, __Met
*
When the service scheduler stops running tasks, it attempts to maintain balance across * the Availability Zones in your cluster using the following logic:
*You must have a service-linked role when you update any of the following service properties. + * If you specified a custom IAM role when you created the service, Amazon ECS automatically + * replaces the roleARN associated with the service with the ARN of your service-linked + * role. For more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.
+ *
+ * loadBalancers,
+ *
+ * serviceRegistries
+ *
Determines whether to enable managed scaling for the capacity provider.
+ *Determines whether to use managed scaling for the capacity provider.
*/ status?: ManagedScalingStatus | string; @@ -502,7 +502,7 @@ export interface ExecuteCommandLogConfiguration { cloudWatchLogGroupName?: string; /** - *Determines whether to enable encryption on the CloudWatch logs. If not specified, + *
Determines whether to use encryption on the CloudWatch logs. If not specified, * encryption will be disabled.
*/ cloudWatchEncryptionEnabled?: boolean; @@ -682,7 +682,7 @@ export enum ClusterSettingName { } /** - *The settings to use when creating a cluster. This parameter is used to enable CloudWatch + *
The settings to use when creating a cluster. This parameter is used to turn on CloudWatch * Container Insights for a cluster.
*/ export interface ClusterSetting { @@ -757,7 +757,7 @@ export interface CreateClusterRequest { tags?: Tag[]; /** - *The setting to use when creating a cluster. This parameter is used to enable CloudWatch + *
The setting to use when creating a cluster. This parameter is used to turn on CloudWatch
* Container Insights for a cluster. If this value is specified, it overrides the
* containerInsights
value set with PutAccountSetting or
* PutAccountSettingDefault.
The deployment circuit breaker determines whether a * service deployment will fail if the service can't reach a steady state. If enabled, a * service deployment will transition to a failed state and stop launching new tasks. You - * can also enable Amazon ECS to roll back your service to the last completed deployment after a + * can also configure Amazon ECS to roll back your service to the last completed deployment after a * failure. For more information, see Rolling * update in the Amazon Elastic Container Service Developer Guide.
*/ export interface DeploymentCircuitBreaker { /** - *Determines whether to enable the deployment circuit breaker logic for the + *
Determines whether to use the deployment circuit breaker logic for the * service.
*/ enable: boolean | undefined; /** - *Determines whether to enable Amazon ECS to roll back the service if a service deployment + *
Determines whether to configure Amazon ECS to roll back the service if a service deployment * fails. If rollback is enabled, when a service deployment fails, the service is rolled * back to the last deployment that completed successfully.
*/ @@ -1283,6 +1283,14 @@ export enum LaunchType { *The load balancer configuration to use with a service or task set.
*For specific notes and restrictions regarding the use of load balancers with services * and task sets, see the CreateService and CreateTaskSet actions.
+ *When you add, update, or remove a load blaancer configuration, Amazon ECS starts a new + * deployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and + * deregister from load balancers.
+ *We recommend that you verify this on a test environment before you update the Elastic Load Balancing + * configuration.
+ *A service-linked role is required for services that use multiple target groups. For + * more information, see Service-linked + * roles in the Amazon Elastic Container Service Developer Guide.
*/ export interface LoadBalancer { /** @@ -1493,6 +1501,7 @@ export namespace PlacementStrategy { } export enum PropagateTags { + NONE = "NONE", SERVICE = "SERVICE", TASK_DEFINITION = "TASK_DEFINITION", } @@ -1504,6 +1513,11 @@ export enum SchedulingStrategy { /** *The details for the service registry.
+ *Each service may be associated with one service registry. Multiple service registries for + * each service are not supported.
+ *When you add, update, or remove the service registries configuration, Amazon ECS starts a + * new deployment. New tasks are registered and deregistered to the updated service + * registry configuration.
*/ export interface ServiceRegistry { /** @@ -1594,10 +1608,8 @@ export interface CreateServiceRequest { * also have up to two listeners: a required listener for production traffic and an * optional listener that you can use to perform validation tests with Lambda functions * before routing production traffic to it. - *After you create a service using the ECS
deployment controller, the load
- * balancer name or target group ARN, container name, and container port that's specified
- * in the service definition are immutable. If you use the CODE_DEPLOY
- * deployment controller, these values can be changed when updating the service.
If you use the CODE_DEPLOY
deployment controller, these values can be changed
+ * when updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,
* the container name, and the container port to access from the load balancer. The
* container name must be as it appears in a container definition. The load balancer name
@@ -1741,6 +1753,7 @@ export interface CreateServiceRequest {
* service is configured to use a load balancer. If your service has a load balancer
* defined and you don't specify a health check grace period value, the default value of
* 0
is used.
If you do not use an Elastic Load Balancing, we recomend that you use the startPeriod
in the task definition healtch check parameters. For more information, see Health check.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you * can specify a health check grace period of up to * 2,147,483,647 @@ -1828,7 +1841,7 @@ export interface CreateServiceRequest { tags?: Tag[]; /** - *
Specifies whether to enable Amazon ECS managed tags for the tasks within the service. For + *
Specifies whether to turn on Amazon ECS managed tags for the tasks within the service. For * more information, see Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.
*/ @@ -2464,13 +2477,11 @@ export interface Service { * *
- * DAEMON
-The daemon scheduling strategy deploys exactly one
- * task on each active container
- * instance.
- * This taskmeets all of the task placement constraints that you
- * specify in your cluster. The service scheduler also evaluates the task placement
- * constraints for running tasks. It stop tasks that don't meet the placement
- * constraints.
DAEMON
-The daemon scheduling strategy deploys exactly one task on each
+ * active container instance. This task meets all of the task placement constraints
+ * that you specify in your cluster. The service scheduler also evaluates the task
+ * placement constraints for running tasks. It stop tasks that don't meet the
+ * placement constraints.
* Fargate tasks don't support the DAEMON
* scheduling strategy.
Determines whether to enable Amazon ECS managed tags for the tasks in the service. For more + *
Determines whether to use Amazon ECS managed tags for the tasks in the service. For more * information, see Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.
*/ @@ -2935,8 +2946,8 @@ export enum TargetType { } /** - *An attribute is a name-value pair that's associated with an Amazon ECS object. Attributes - * enable you to extend the Amazon ECS data model by adding custom metadata to your resources. + *
An attribute is a name-value pair that's associated with an Amazon ECS object. Use attributes + * to extend the Amazon ECS data model by adding custom metadata to your resources. * For more information, see Attributes in the Amazon Elastic Container Service Developer Guide.
*/ export interface Attribute { @@ -3701,7 +3712,7 @@ export enum ContainerCondition { * multiple dependencies. When a dependency is defined for container startup, for container * shutdown it is reversed. *Your Amazon ECS container instances require at least version 1.26.0 of the container agent - * to enable container dependencies. However, we recommend using the latest container agent + * to use container dependencies. However, we recommend using the latest container agent * version. For information about checking your agent version and updating to the latest * version, see Updating the Amazon ECS * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using @@ -4286,6 +4297,7 @@ export interface Secret { /** *
The secret to expose to the container. The supported values are either the full ARN of * the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter Store.
+ *For information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter store) in the Amazon Elastic Container Service Developer Guide.
*If the SSM Parameter Store parameter exists in the same Region as the task * you're launching, then you can use either the full ARN or name of the parameter. If @@ -5015,7 +5027,7 @@ export interface ContainerDefinition { * multiple dependencies. When a dependency is defined for container startup, for container * shutdown it is reversed.
*For tasks using the EC2 launch type, the container instances require at - * least version 1.26.0 of the container agent to enable container dependencies. However, + * least version 1.26.0 of the container agent to turn on container dependencies. However, * we recommend using the latest container agent version. For information about checking * your agent version and updating to the latest version, see Updating the Amazon ECS * Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using @@ -5059,7 +5071,7 @@ export interface ContainerDefinition { *
For tasks using the EC2 launch type, your container instances require at
- * least version 1.26.0
of the container agent to enable a container start
+ * least version 1.26.0
of the container agent to use a container start
* timeout value. However, we recommend using the latest container agent version. For
* information about checking your agent version and updating to the latest version, see
* Updating the Amazon ECS
@@ -5092,7 +5104,7 @@ export interface ContainerDefinition {
* stopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
* agent configuration variable are set, then the default values of 30 seconds for Linux
* containers and 30 seconds on Windows containers are used. Your container instances
- * require at least version 1.26.0 of the container agent to enable a container stop
+ * require at least version 1.26.0 of the container agent to use a container stop
* timeout value. However, we recommend using the latest container agent version. For
* information about checking your agent version and updating to the latest version, see
* Updating the Amazon ECS
@@ -5394,16 +5406,9 @@ export namespace ContainerDefinition {
* tasks hosted on Fargate. For more information, see Fargate task
* storage in the Amazon ECS User Guide for Fargate.
This parameter is only supported for tasks hosted on Fargate using - * the following platform versions:
- *Linux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
This parameter is only supported for tasks hosted on Fargate using Linux
+ * platform version 1.4.0
or later. This parameter is not supported for
+ * Windows containers on Fargate.
The configuration details for the App Mesh proxy.
*For tasks that use the EC2 launch type, the container instances require
* at least version 1.26.0 of the container agent and at least version 1.26.0-1 of the
- * ecs-init
package to enable a proxy configuration. If your container
+ * ecs-init
package to use a proxy configuration. If your container
* instances are launched from the Amazon ECS optimized AMI version 20190301
or
* later, then they contain the required versions of the container agent and
* ecs-init
. For more information, see Amazon ECS-optimized Linux AMI
@@ -5772,7 +5777,7 @@ export interface EFSVolumeConfiguration {
rootDirectory?: string;
/**
- *
Determines whether to enable encryption for Amazon EFS data in transit between the Amazon ECS + *
Determines whether to use encryption for Amazon EFS data in transit between the Amazon ECS
* host and the Amazon EFS server. Transit encryption must be enabled if Amazon EFS IAM authorization
* is used. If this parameter is omitted, the default value of DISABLED
is
* used. For more information, see Encrypting Data in Transit in
@@ -6227,7 +6232,7 @@ export interface TaskDefinition {
/**
*
The configuration details for the App Mesh proxy.
*Your Amazon ECS container instances require at least version 1.26.0 of the container agent
- * and at least version 1.26.0-1 of the ecs-init
package to enable a proxy
+ * and at least version 1.26.0-1 of the ecs-init
package to use a proxy
* configuration. If your container instances are launched from the Amazon ECS optimized AMI
* version 20190301
or later, they contain the required versions of the
* container agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The stop code indicating why a task was stopped. The stoppedReason
might
* contain additional details.
The following are valid values:
+ *
+ * TaskFailedToStart
+ *
+ * EssentialContainerExited
+ *
+ * UserInitiated
+ *
+ * TerminationNotice
+ *
+ * ServiceSchedulerInitiated
+ *
+ * SpotInterruption
+ *
A URL - * back - * to managed agent on the container that the SSM Session Manager client + * to the managed agent on the container that the SSM Session Manager client * uses to send commands and receive output from the container.
*/ streamUrl?: string; @@ -9060,7 +9097,7 @@ export interface RegisterTaskDefinitionRequest { *The configuration details for the App Mesh proxy.
*For tasks hosted on Amazon EC2 instances, the container instances require at least version
* 1.26.0
of the container agent and at least version
- * 1.26.0-1
of the ecs-init
package to enable a proxy
+ * 1.26.0-1
of the ecs-init
package to use a proxy
* configuration. If your container instances are launched from the Amazon ECS-optimized
* AMI version 20190301
or later, then they contain the required versions of
* the container agent and ecs-init
. For more information, see Amazon ECS-optimized AMI versions in the
@@ -9178,14 +9215,14 @@ export interface RunTaskRequest {
count?: number;
/**
- *
Specifies whether to enable Amazon ECS managed tags for the task. For more information, see + *
Specifies whether to use Amazon ECS managed tags for the task. For more information, see * Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.
*/ enableECSManagedTags?: boolean; /** - *Determines whether to enable the execute command functionality for the containers in + *
Determines whether to use the execute command functionality for the containers in
* this task. If true
, this enables execute command functionality on all
* containers in the task.
Specifies whether to enable Amazon ECS managed tags for the task. For more information, see + *
Specifies whether to use Amazon ECS managed tags for the task. For more information, see * Tagging Your Amazon ECS * Resources in the Amazon Elastic Container Service Developer Guide.
*/ @@ -10101,7 +10138,7 @@ export interface UpdateClusterSettingsRequest { cluster: string | undefined; /** - *The setting to use by default for a cluster. This parameter is used to enable CloudWatch + *
The setting to use by default for a cluster. This parameter is used to turn on CloudWatch
* Container Insights for a cluster. If this value is specified, it overrides the
* containerInsights
value set with PutAccountSetting or
* PutAccountSettingDefault.
null
when performing this action.
*/
enableExecuteCommand?: boolean;
+
+ /**
+ * Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For more + * information, see Tagging Your Amazon ECS + * Resources in the Amazon Elastic Container Service Developer Guide.
+ *Only tasks launched after the update will reflect the update. To update the tags on
+ * all tasks, set forceNewDeployment
to true
, so that Amazon ECS
+ * starts new tasks with the updated tags.
A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the + * container name, and the container port to access from the load balancer. The container + * name is as it appears in a container definition.
+ *When you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks with + * the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks are + * running.
+ *You can remove existing loadBalancers
by passing an empty list.
Determines whether to propagate the tags from the task definition or the service to + * the task. If no value is specified, the tags aren't propagated.
+ *Only tasks launched after the update will reflect the update. To update the tags on
+ * all tasks, set forceNewDeployment
to true
, so that Amazon ECS
+ * starts new tasks with the updated tags.
The details for the service discovery registries to assign to this service. For more + * information, see Service + * Discovery.
+ *When you add, update, or remove the service registries configuration, Amazon ECS starts new tasks + * with the updated service registries configuration, and then stops the old tasks when the + * new tasks are running.
+ *You can remove existing serviceRegistries
by passing an empty
+ * list.
Details about the task set.
+ *etails about the task set.
*/ taskSet?: TaskSet; } diff --git a/clients/client-ecs/src/protocols/Aws_json1_1.ts b/clients/client-ecs/src/protocols/Aws_json1_1.ts index 01f55a6fb81a..ea9f0e0b365a 100644 --- a/clients/client-ecs/src/protocols/Aws_json1_1.ts +++ b/clients/client-ecs/src/protocols/Aws_json1_1.ts @@ -6203,6 +6203,8 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c deploymentConfiguration: serializeAws_json1_1DeploymentConfiguration(input.deploymentConfiguration, context), }), ...(input.desiredCount !== undefined && input.desiredCount !== null && { desiredCount: input.desiredCount }), + ...(input.enableECSManagedTags !== undefined && + input.enableECSManagedTags !== null && { enableECSManagedTags: input.enableECSManagedTags }), ...(input.enableExecuteCommand !== undefined && input.enableExecuteCommand !== null && { enableExecuteCommand: input.enableExecuteCommand }), ...(input.forceNewDeployment !== undefined && @@ -6211,6 +6213,10 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c input.healthCheckGracePeriodSeconds !== null && { healthCheckGracePeriodSeconds: input.healthCheckGracePeriodSeconds, }), + ...(input.loadBalancers !== undefined && + input.loadBalancers !== null && { + loadBalancers: serializeAws_json1_1LoadBalancers(input.loadBalancers, context), + }), ...(input.networkConfiguration !== undefined && input.networkConfiguration !== null && { networkConfiguration: serializeAws_json1_1NetworkConfiguration(input.networkConfiguration, context), @@ -6225,7 +6231,12 @@ const serializeAws_json1_1UpdateServiceRequest = (input: UpdateServiceRequest, c }), ...(input.platformVersion !== undefined && input.platformVersion !== null && { platformVersion: input.platformVersion }), + ...(input.propagateTags !== undefined && input.propagateTags !== null && { propagateTags: input.propagateTags }), ...(input.service !== undefined && input.service !== null && { service: input.service }), + ...(input.serviceRegistries !== undefined && + input.serviceRegistries !== null && { + serviceRegistries: serializeAws_json1_1ServiceRegistries(input.serviceRegistries, context), + }), ...(input.taskDefinition !== undefined && input.taskDefinition !== null && { taskDefinition: input.taskDefinition }), }; diff --git a/clients/client-eks/src/models/models_0.ts b/clients/client-eks/src/models/models_0.ts index a53cddc24e9a..4d84897cb7b8 100644 --- a/clients/client-eks/src/models/models_0.ts +++ b/clients/client-eks/src/models/models_0.ts @@ -2235,6 +2235,7 @@ export enum NodegroupIssueCode { EC2_SECURITY_GROUP_DELETION_FAILURE = "Ec2SecurityGroupDeletionFailure", EC2_SECURITY_GROUP_NOT_FOUND = "Ec2SecurityGroupNotFound", EC2_SUBNET_INVALID_CONFIGURATION = "Ec2SubnetInvalidConfiguration", + EC2_SUBNET_MISSING_IPV6_ASSIGNMENT = "Ec2SubnetMissingIpv6Assignment", EC2_SUBNET_NOT_FOUND = "Ec2SubnetNotFound", IAM_INSTANCE_PROFILE_NOT_FOUND = "IamInstanceProfileNotFound", IAM_LIMIT_EXCEEDED = "IamLimitExceeded", diff --git a/clients/client-elasticache/src/models/models_0.ts b/clients/client-elasticache/src/models/models_0.ts index 948e4ca0a4f0..56bdbf068cf5 100644 --- a/clients/client-elasticache/src/models/models_0.ts +++ b/clients/client-elasticache/src/models/models_0.ts @@ -2424,37 +2424,6 @@ export interface CreateCacheClusterMessage { * * *Memory optimized with data tiering:
- *Current generation:
- * - *- * R6gd node types (available only for Redis engine version 6.2 onward).
- * - * - * - * - *
- *
- * cache.r6gd.xlarge
,
- * cache.r6gd.2xlarge
,
- * cache.r6gd.4xlarge
,
- * cache.r6gd.8xlarge
,
- * cache.r6gd.12xlarge
,
- * cache.r6gd.16xlarge
- *
- *
- *
- *
- *
- *
- *
Memory optimized:
*Creates a group of permissions for various actions that a user can perform in FinSpace.
+ */ + public createPermissionGroup( + args: CreatePermissionGroupCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a new user in FinSpace.
+ */ + public createUser(args: CreateUserCommandInput, options?: __HttpHandlerOptions): PromiseDeletes a FinSpace Dataset.
*/ @@ -196,6 +285,90 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *Deletes a permission group. This action is irreversible.
+ */ + public deletePermissionGroup( + args: DeletePermissionGroupCommandInput, + options?: __HttpHandlerOptions + ): PromiseDenies access to the FinSpace web application and API for the specified user.
+ */ + public disableUser(args: DisableUserCommandInput, options?: __HttpHandlerOptions): PromiseAllows the specified user to access the FinSpace web application and API.
+ */ + public enableUser(args: EnableUserCommandInput, options?: __HttpHandlerOptions): PromiseGet information about a Changeset.
*/ @@ -309,6 +482,32 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *Retrieves details for a specific user.
+ */ + public getUser(args: GetUserCommandInput, options?: __HttpHandlerOptions): PromiseA temporary Amazon S3 location, where you can copy your files from a source location to stage or use * as a scratch space in FinSpace notebook.
@@ -435,6 +634,96 @@ export class FinspaceData extends FinspaceDataClient { } } + /** + *Lists all available permission groups in FinSpace.
+ */ + public listPermissionGroups( + args: ListPermissionGroupsCommandInput, + options?: __HttpHandlerOptions + ): PromiseLists all available user accounts in FinSpace.
+ */ + public listUsers(args: ListUsersCommandInput, options?: __HttpHandlerOptions): PromiseResets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.
+ */ + public resetUserPassword( + args: ResetUserPasswordCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates a FinSpace Changeset.
*/ @@ -498,4 +787,62 @@ export class FinspaceData extends FinspaceDataClient { return this.send(command, optionsOrCb); } } + + /** + *Modifies the details of a permission group. You cannot modify a permissionGroupID
.
Modifies the details of the specified user account. You cannot update the userId
for a user.
Creates a group of permissions for various actions that a user can perform in FinSpace.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, CreatePermissionGroupCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, CreatePermissionGroupCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new CreatePermissionGroupCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreatePermissionGroupCommandInput} for command's `input` shape. + * @see {@link CreatePermissionGroupCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class CreatePermissionGroupCommand extends $Command< + CreatePermissionGroupCommandInput, + CreatePermissionGroupCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreatePermissionGroupCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a new user in FinSpace.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, CreateUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, CreateUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new CreateUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateUserCommandInput} for command's `input` shape. + * @see {@link CreateUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class CreateUserCommand extends $Command< + CreateUserCommandInput, + CreateUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes a permission group. This action is irreversible.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, DeletePermissionGroupCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, DeletePermissionGroupCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new DeletePermissionGroupCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeletePermissionGroupCommandInput} for command's `input` shape. + * @see {@link DeletePermissionGroupCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class DeletePermissionGroupCommand extends $Command< + DeletePermissionGroupCommandInput, + DeletePermissionGroupCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeletePermissionGroupCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDenies access to the FinSpace web application and API for the specified user.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, DisableUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, DisableUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new DisableUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisableUserCommandInput} for command's `input` shape. + * @see {@link DisableUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class DisableUserCommand extends $Command< + DisableUserCommandInput, + DisableUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackAllows the specified user to access the FinSpace web application and API.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, EnableUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, EnableUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new EnableUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EnableUserCommandInput} for command's `input` shape. + * @see {@link EnableUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class EnableUserCommand extends $Command< + EnableUserCommandInput, + EnableUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRetrieves details for a specific user.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, GetUserCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, GetUserCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new GetUserCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetUserCommandInput} for command's `input` shape. + * @see {@link GetUserCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class GetUserCommand extends $Command< + GetUserCommandInput, + GetUserCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUserCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists all available permission groups in FinSpace.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ListPermissionGroupsCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ListPermissionGroupsCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ListPermissionGroupsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListPermissionGroupsCommandInput} for command's `input` shape. + * @see {@link ListPermissionGroupsCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ListPermissionGroupsCommand extends $Command< + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListPermissionGroupsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackLists all available user accounts in FinSpace.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ListUsersCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ListUsersCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ListUsersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListUsersCommandInput} for command's `input` shape. + * @see {@link ListUsersCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ListUsersCommand extends $Command< + ListUsersCommandInput, + ListUsersCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListUsersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackResets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FinspaceDataClient, ResetUserPasswordCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import + * // const { FinspaceDataClient, ResetUserPasswordCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import + * const client = new FinspaceDataClient(config); + * const command = new ResetUserPasswordCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ResetUserPasswordCommandInput} for command's `input` shape. + * @see {@link ResetUserPasswordCommandOutput} for command's `response` shape. + * @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape. + * + */ +export class ResetUserPasswordCommand extends $Command< + ResetUserPasswordCommandInput, + ResetUserPasswordCommandOutput, + FinspaceDataClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ResetUserPasswordCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackModifies the details of a permission group. You cannot modify a permissionGroupID
.
Modifies the details of the specified user account. You cannot update the userId
for a user.
The request conflicts with an existing resource.
*/ export class ConflictException extends __BaseException { readonly name: "ConflictException" = "ConflictException"; readonly $fault: "client" = "client"; + reason?: string; /** * @internal */ @@ -38,6 +54,7 @@ export class ConflictException extends __BaseException { ...opts, }); Object.setPrototypeOf(this, ConflictException.prototype); + this.reason = opts.reason; } } @@ -63,21 +80,21 @@ export interface CreateChangesetRequest { datasetId: string | undefined; /** - *Option to indicate how a Changeset will be applied to a Dataset.
+ *The option to indicate how a Changeset will be applied to a Dataset.
*
- * REPLACE
- Changeset will be considered as a replacement to all prior
+ * REPLACE
– Changeset will be considered as a replacement to all prior
* loaded Changesets.
- * APPEND
- Changeset will be considered as an addition to the end of all
+ * APPEND
– Changeset will be considered as an addition to the end of all
* prior loaded Changesets.
- * MODIFY
- Changeset is considered as a replacement to a specific prior ingested Changeset.
MODIFY
– Changeset is considered as a replacement to a specific prior ingested Changeset.
* The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace APIsection.
+ *The S3 path that you specify must allow the FinSpace role access. To do that, you first need to configure the IAM policy on S3 bucket. For more information, see Loading data from an Amazon S3 Bucket using the FinSpace API section.
*/ sourceParams: { [key: string]: string } | undefined; @@ -109,19 +126,19 @@ export interface CreateChangesetRequest { *
- * PARQUET
- Parquet source file format.
PARQUET
– Parquet source file format.
*
- * CSV
- CSV source file format.
CSV
– CSV source file format.
*
- * JSON
- JSON source file format.
JSON
– JSON source file format.
*
- * XML
- XML source file format.
XML
– XML source file format.
* Name of the Dataset owner.
+ *The name of the Dataset owner.
*/ name?: string; @@ -311,6 +332,7 @@ export namespace DatasetOwnerInfo { */ export const filterSensitiveLog = (obj: DatasetOwnerInfo): any => ({ ...obj, + ...(obj.email && { email: SENSITIVE_STRING }), }); } @@ -351,7 +373,7 @@ export namespace DatasetOwnerInfo { * *For more information on the ataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.
+ *For more information on the dataset permissions, see Supported Dataset Permissions in the FinSpace User Guide.
*/ export interface ResourcePermission { /** @@ -388,7 +410,7 @@ export namespace ResourcePermission { */ export interface PermissionGroupParams { /** - *The unique identifier of the PermissionGroup.
+ *The unique identifier for the PermissionGroup
.
- * STRING
- A String data type.
STRING
– A String data type.
*
- * CHAR
- A char data type.
CHAR
– A char data type.
*
- * INTEGER
- An integer data type.
INTEGER
– An integer data type.
*
- * TINYINT
- A tinyint data type.
TINYINT
– A tinyint data type.
*
- * SMALLINT
- A smallint data type.
SMALLINT
– A smallint data type.
*
- * BIGINT
- A bigint data type.
BIGINT
– A bigint data type.
*
- * FLOAT
- A float data type.
FLOAT
– A float data type.
*
- * DOUBLE
- A double data type.
DOUBLE
– A double data type.
*
- * DATE
- A date data type.
DATE
– A date data type.
*
- * DATETIME
- A datetime data type.
DATETIME
– A datetime data type.
*
- * BOOLEAN
- A boolean data type.
BOOLEAN
– A boolean data type.
*
- * BINARY
- A binary data type.
BINARY
– A binary data type.
* Name for a column.
+ *The name of a column.
*/ columnName?: string; @@ -541,11 +563,11 @@ export interface CreateDatasetRequest { *
- * TABULAR
- Data is structured in a tabular format.
TABULAR
– Data is structured in a tabular format.
*
- * NON_TABULAR
- Data is structured in a non-tabular format.
NON_TABULAR
– Data is structured in a non-tabular format.
*
- * GLUE_TABLE
- Glue table destination type.
GLUE_TABLE
– Glue table destination type.
*
- * S3
- S3 destination type.
S3
– S3 destination type.
*
- * PARQUET
- Parquet export file format.
PARQUET
– Parquet export file format.
*
- * DELIMITED_TEXT
- Delimited text export file format.
DELIMITED_TEXT
– Delimited text export file format.
* Beginning time to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Beginning time to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ asOfTimestamp?: number; @@ -742,6 +765,175 @@ export namespace CreateDataViewResponse { }); } +export interface CreatePermissionGroupRequest { + /** + *The name of the permission group.
+ */ + name: string | undefined; + + /** + *A brief description for the permission group.
+ */ + description?: string; + + /** + *The option to indicate FinSpace application permissions that are granted to a specific group.
+ *
+ * CreateDataset
– Group members can create new datasets.
+ * ManageClusters
– Group members can manage Apache Spark clusters from FinSpace notebooks.
+ * ManageUsersAndGroups
– Group members can manage users and permission groups.
+ * ManageAttributeSets
– Group members can manage attribute sets.
+ * ViewAuditData
– Group members can view audit data.
+ * AccessNotebooks
– Group members will have access to FinSpace notebooks.
+ * GetTemporaryCredentials
– Group members can get temporary API credentials.
A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace CreatePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePermissionGroupRequest): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface CreatePermissionGroupResponse { + /** + *The unique identifier for the permission group.
+ */ + permissionGroupId?: string; +} + +export namespace CreatePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export enum UserType { + APP_USER = "APP_USER", + SUPER_USER = "SUPER_USER", +} + +export interface CreateUserRequest { + /** + *The email address of the user that you want to register. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.
+ */ + emailAddress: string | undefined; + + /** + *The option to indicate the type of user. Use one of the following options to specify this parameter:
+ *
+ * SUPER_USER
– A user with permission to all the functionality and data in FinSpace.
+ * APP_USER
– A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permission group.
The first name of the user that you want to register.
+ */ + firstName?: string; + + /** + *The last name of the user that you want to register.
+ */ + lastName?: string; + + /** + *The option to indicate whether the user can use the GetProgrammaticAccessCredentials
API to obtain credentials that can then be used to access other FinSpace Data API operations.
+ * ENABLED
– The user has permissions to use the APIs.
+ * DISABLED
– The user does not have permissions to use any APIs.
The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials
API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.
A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace CreateUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserRequest): any => ({ + ...obj, + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + }); +} + +export interface CreateUserResponse { + /** + *The unique identifier for the user.
+ */ + userId?: string; +} + +export namespace CreateUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserResponse): any => ({ + ...obj, + }); +} + /** * The request for a DeleteDataset operation. */ @@ -785,6 +977,117 @@ export namespace DeleteDatasetResponse { }); } +export interface DeletePermissionGroupRequest { + /** + *The unique identifier for the permission group that you want to delete.
+ */ + permissionGroupId: string | undefined; + + /** + *A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace DeletePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionGroupRequest): any => ({ + ...obj, + }); +} + +export interface DeletePermissionGroupResponse { + /** + *The unique identifier for the deleted permission group.
+ */ + permissionGroupId?: string; +} + +export namespace DeletePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export interface DisableUserRequest { + /** + *The unique identifier for the user account that you want to disable.
+ */ + userId: string | undefined; + + /** + *A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace DisableUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableUserRequest): any => ({ + ...obj, + }); +} + +export interface DisableUserResponse { + /** + *The unique identifier for the disabled user account.
+ */ + userId?: string; +} + +export namespace DisableUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableUserResponse): any => ({ + ...obj, + }); +} + +export interface EnableUserRequest { + /** + *The unique identifier for the user account that you want to enable.
+ */ + userId: string | undefined; + + /** + *A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace EnableUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableUserRequest): any => ({ + ...obj, + }); +} + +export interface EnableUserResponse { + /** + *The unique identifier for the enabled user account.
+ */ + userId?: string; +} + +export namespace EnableUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableUserResponse): any => ({ + ...obj, + }); +} + /** * Request to describe a changeset. */ @@ -834,40 +1137,40 @@ export interface ChangesetErrorInfo { *
- * VALIDATION
-The inputs to this request are invalid.
VALIDATION
– The inputs to this request are invalid.
*
- * SERVICE_QUOTA_EXCEEDED
- Service quotas have been exceeded. Please
+ * SERVICE_QUOTA_EXCEEDED
– Service quotas have been exceeded. Please
* contact AWS support to increase quotas.
- * ACCESS_DENIED
- Missing required permission to perform this
+ * ACCESS_DENIED
– Missing required permission to perform this
* request.
- * RESOURCE_NOT_FOUND
- One or more inputs to this request were not
+ * RESOURCE_NOT_FOUND
– One or more inputs to this request were not
* found.
- * THROTTLING
- The system temporarily lacks sufficient resources to process
+ * THROTTLING
– The system temporarily lacks sufficient resources to process
* the request.
- * INTERNAL_SERVICE_EXCEPTION
- An internal service error has
+ * INTERNAL_SERVICE_EXCEPTION
– An internal service error has
* occurred.
- * CANCELLED
- Cancelled.
CANCELLED
– Cancelled.
*
- * USER_RECOVERABLE
- A user recoverable error has occurred.
USER_RECOVERABLE
– A user recoverable error has occurred.
*
- * REPLACE
- Changeset is considered as a replacement to all prior loaded Changesets.
REPLACE
– Changeset is considered as a replacement to all prior loaded Changesets.
*
- * APPEND
- Changeset is considered as an addition to the end of all prior loaded Changesets.
APPEND
– Changeset is considered as an addition to the end of all prior loaded Changesets.
*
- * MODIFY
- Changeset is considered as a replacement to a specific prior ingested Changeset.
MODIFY
– Changeset is considered as a replacement to a specific prior ingested Changeset.
* The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; @@ -955,12 +1258,12 @@ export interface GetChangesetResponse { errorInfo?: ChangesetErrorInfo; /** - *Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ activeUntilTimestamp?: number; /** - *Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ activeFromTimestamp?: number; @@ -1034,11 +1337,11 @@ export interface GetDatasetResponse { *
- * TABULAR
- Data is structured in a tabular format.
TABULAR
– Data is structured in a tabular format.
*
- * NON_TABULAR
- Data is structured in a non-tabular format.
NON_TABULAR
– Data is structured in a non-tabular format.
* The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; /** - *The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ lastModifiedTime?: number; @@ -1074,19 +1377,19 @@ export interface GetDatasetResponse { *
- * PENDING
- Dataset is pending creation.
PENDING
– Dataset is pending creation.
*
- * FAILED
- Dataset creation has failed.
FAILED
– Dataset creation has failed.
*
- * SUCCESS
- Dataset creation has succeeded.
SUCCESS
– Dataset creation has succeeded.
*
- * RUNNING
- Dataset creation is running.
RUNNING
– Dataset creation is running.
*
- * VALIDATION
-The inputs to this request are invalid.
VALIDATION
– The inputs to this request are invalid.
*
- * SERVICE_QUOTA_EXCEEDED
- Service quotas have been exceeded. Please
+ * SERVICE_QUOTA_EXCEEDED
– Service quotas have been exceeded. Please
* contact AWS support to increase quotas.
- * ACCESS_DENIED
- Missing required permission to perform this
+ * ACCESS_DENIED
– Missing required permission to perform this
* request.
- * RESOURCE_NOT_FOUND
- One or more inputs to this request were not
+ * RESOURCE_NOT_FOUND
– One or more inputs to this request were not
* found.
- * THROTTLING
- The system temporarily lacks sufficient resources to process
+ * THROTTLING
– The system temporarily lacks sufficient resources to process
* the request.
- * INTERNAL_SERVICE_EXCEPTION
- An internal service error has
+ * INTERNAL_SERVICE_EXCEPTION
– An internal service error has
* occurred.
- * CANCELLED
- Cancelled.
CANCELLED
– Cancelled.
*
- * USER_RECOVERABLE
- A user recoverable error has occurred.
USER_RECOVERABLE
– A user recoverable error has occurred.
* Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ asOfTimestamp?: number; @@ -1230,12 +1533,12 @@ export interface GetDataViewResponse { errorInfo?: DataViewErrorInfo; /** - *The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ lastModifiedTime?: number; /** - *The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; @@ -1264,35 +1567,35 @@ export interface GetDataViewResponse { *
- * RUNNING
- Dataview creation is running.
RUNNING
– Dataview creation is running.
*
- * STARTING
- Dataview creation is starting.
STARTING
– Dataview creation is starting.
*
- * FAILED
- Dataview creation has failed.
FAILED
– Dataview creation has failed.
*
- * CANCELLED
- Dataview creation has been cancelled.
CANCELLED
– Dataview creation has been cancelled.
*
- * TIMEOUT
- Dataview creation has timed out.
TIMEOUT
– Dataview creation has timed out.
*
- * SUCCESS
- Dataview creation has succeeded.
SUCCESS
– Dataview creation has succeeded.
*
- * PENDING
- Dataview creation is pending.
PENDING
– Dataview creation is pending.
*
- * FAILED_CLEANUP_FAILED
- Dataview creation failed and resource cleanup failed.
FAILED_CLEANUP_FAILED
– Dataview creation failed and resource cleanup failed.
* The unique identifier of the user to get data for.
+ */ + userId: string | undefined; } -export interface GetWorkingLocationRequest { +export namespace GetUserRequest { /** - *Specify the type of the working location.
+ * @internal + */ + export const filterSensitiveLog = (obj: GetUserRequest): any => ({ + ...obj, + }); +} + +export enum UserStatus { + CREATING = "CREATING", + DISABLED = "DISABLED", + ENABLED = "ENABLED", +} + +export interface GetUserResponse { + /** + *The unique identifier for the user account that is retrieved.
+ */ + userId?: string; + + /** + *The current status of the user account.
*
- * SAGEMAKER
- Use the Amazon S3 location as a temporary location to store data content when
- * working with FinSpace Notebooks that run on SageMaker studio.
CREATING
– The user account creation is in progress.
*
- * INGESTION
- Use the Amazon S3 location as a staging location to copy your
- * data content and then use the location with the Changeset creation operation.
ENABLED
– The user account is created and is currently active.
+ *
+ * DISABLED
– The user account is currently inactive.
The first name of the user.
*/ - export const filterSensitiveLog = (obj: GetWorkingLocationRequest): any => ({ - ...obj, - }); -} + firstName?: string; -export interface GetWorkingLocationResponse { /** - *Returns the Amazon S3 URI for the working location.
+ *The last name of the user.
*/ - s3Uri?: string; + lastName?: string; /** - *Returns the Amazon S3 Path for the working location.
+ *The email address that is associated with the user.
*/ - s3Path?: string; + emailAddress?: string; /** - *Returns the Amazon S3 bucket name for the working location.
+ *Indicates the type of user.
+ *
+ * SUPER_USER
– A user with permission to all the functionality and data in FinSpace.
+ * APP_USER
– A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.
Indicates whether the user can use the GetProgrammaticAccessCredentials
API to obtain credentials that can then be used to access other FinSpace Data API operations.
+ * ENABLED
– The user has permissions to use the APIs.
+ * DISABLED
– The user does not have permissions to use any APIs.
The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials
API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.
The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.
+ */ + createTime?: number; + + /** + *Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds.
+ */ + lastEnabledTime?: number; + + /** + *Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.
+ */ + lastDisabledTime?: number; + + /** + *Describes the last time the user account was updated. The value is determined as epoch time in milliseconds.
+ */ + lastModifiedTime?: number; + + /** + *Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds.
+ */ + lastLoginTime?: number; +} + +export namespace GetUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUserResponse): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + }); +} + +export enum LocationType { + INGESTION = "INGESTION", + SAGEMAKER = "SAGEMAKER", +} + +export interface GetWorkingLocationRequest { + /** + *Specify the type of the working location.
+ *
+ * SAGEMAKER
– Use the Amazon S3 location as a temporary location to store data content when
+ * working with FinSpace Notebooks that run on SageMaker studio.
+ * INGESTION
– Use the Amazon S3 location as a staging location to copy your
+ * data content and then use the location with the Changeset creation operation.
Returns the Amazon S3 URI for the working location.
+ */ + s3Uri?: string; + + /** + *Returns the Amazon S3 Path for the working location.
+ */ + s3Path?: string; + + /** + *Returns the Amazon S3 bucket name for the working location.
+ */ + s3Bucket?: string; +} + +export namespace GetWorkingLocationResponse { + /** + * @internal */ export const filterSensitiveLog = (obj: GetWorkingLocationResponse): any => ({ ...obj, @@ -1459,7 +1899,7 @@ export interface ListChangesetsRequest { maxResults?: number; /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; } @@ -1497,17 +1937,17 @@ export interface ChangesetSummary { *
- * REPLACE
- Changeset is considered as a replacement to all prior loaded
+ * REPLACE
– Changeset is considered as a replacement to all prior loaded
* Changesets.
- * APPEND
- Changeset is considered as an addition to the end of all prior
+ * APPEND
– Changeset is considered as an addition to the end of all prior
* loaded Changesets.
- * MODIFY
- Changeset is considered as a replacement to a specific prior
+ * MODIFY
– Changeset is considered as a replacement to a specific prior
* ingested Changeset.
The timestamp at which the Changeset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Changeset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; @@ -1534,23 +1974,23 @@ export interface ChangesetSummary { *
- * PENDING
- Changeset is pending creation.
PENDING
– Changeset is pending creation.
*
- * FAILED
- Changeset creation has failed.
FAILED
– Changeset creation has failed.
*
- * SUCCESS
- Changeset creation has succeeded.
SUCCESS
– Changeset creation has succeeded.
*
- * RUNNING
- Changeset creation is running.
RUNNING
– Changeset creation is running.
*
- * STOP_REQUESTED
- User requested Changeset creation to stop.
STOP_REQUESTED
– User requested Changeset creation to stop.
* Time until which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Time until which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ activeUntilTimestamp?: number; /** - *Beginning time from which the Changeset is active. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Beginning time from which the Changeset is active. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ activeFromTimestamp?: number; @@ -1601,7 +2041,7 @@ export interface ListChangesetsResponse { changesets?: ChangesetSummary[]; /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; } @@ -1620,7 +2060,7 @@ export namespace ListChangesetsResponse { */ export interface ListDatasetsRequest { /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; @@ -1663,11 +2103,11 @@ export interface Dataset { *
- * TABULAR
- Data is structured in a tabular format.
TABULAR
– Data is structured in a tabular format.
*
- * NON_TABULAR
- Data is structured in a non-tabular format.
NON_TABULAR
– Data is structured in a non-tabular format.
* The timestamp at which the Dataset was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Dataset was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; /** - *The last time that the Dataset was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The last time that the Dataset was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ lastModifiedTime?: number; @@ -1710,6 +2150,7 @@ export namespace Dataset { */ export const filterSensitiveLog = (obj: Dataset): any => ({ ...obj, + ...(obj.ownerInfo && { ownerInfo: DatasetOwnerInfo.filterSensitiveLog(obj.ownerInfo) }), }); } @@ -1723,7 +2164,7 @@ export interface ListDatasetsResponse { datasets?: Dataset[]; /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; } @@ -1734,6 +2175,7 @@ export namespace ListDatasetsResponse { */ export const filterSensitiveLog = (obj: ListDatasetsResponse): any => ({ ...obj, + ...(obj.datasets && { datasets: obj.datasets.map((item) => Dataset.filterSensitiveLog(item)) }), }); } @@ -1747,7 +2189,7 @@ export interface ListDataViewsRequest { datasetId: string | undefined; /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; @@ -1786,7 +2228,7 @@ export interface DataViewSummary { datasetId?: string; /** - *Time range to use for the Dataview. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *Time range to use for the Dataview. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ asOfTimestamp?: number; @@ -1805,35 +2247,35 @@ export interface DataViewSummary { *
- * RUNNING
- Dataview creation is running.
RUNNING
– Dataview creation is running.
*
- * STARTING
- Dataview creation is starting.
STARTING
– Dataview creation is starting.
*
- * FAILED
- Dataview creation has failed.
FAILED
– Dataview creation has failed.
*
- * CANCELLED
- Dataview creation has been cancelled.
CANCELLED
– Dataview creation has been cancelled.
*
- * TIMEOUT
- Dataview creation has timed out.
TIMEOUT
– Dataview creation has timed out.
*
- * SUCCESS
- Dataview creation has succeeded.
SUCCESS
– Dataview creation has succeeded.
*
- * PENDING
- Dataview creation is pending.
PENDING
– Dataview creation is pending.
*
- * FAILED_CLEANUP_FAILED
- Dataview creation failed and resource cleanup failed.
FAILED_CLEANUP_FAILED
– Dataview creation failed and resource cleanup failed.
* The timestamp at which the Dataview was created in FinSpace. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The timestamp at which the Dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ createTime?: number; /** - *The last time that a Dataview was modified. The value is determined as Epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
+ *The last time that a Dataview was modified. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
*/ lastModifiedTime?: number; } @@ -1876,7 +2318,7 @@ export namespace DataViewSummary { export interface ListDataViewsResponse { /** - *A token indicating where a results page should begin.
+ *A token that indicates where a results page should begin.
*/ nextToken?: string; @@ -1895,6 +2337,334 @@ export namespace ListDataViewsResponse { }); } +export interface ListPermissionGroupsRequest { + /** + *A token that indicates where a results page should begin.
+ */ + nextToken?: string; + + /** + *The maximum number of results per page.
+ */ + maxResults: number | undefined; +} + +export namespace ListPermissionGroupsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionGroupsRequest): any => ({ + ...obj, + }); +} + +/** + *The structure for a permission group.
+ */ +export interface PermissionGroup { + /** + *The unique identifier for the permission group.
+ */ + permissionGroupId?: string; + + /** + *The name of the permission group.
+ */ + name?: string; + + /** + *A brief description for the permission group.
+ */ + description?: string; + + /** + *Indicates the permissions that are granted to a specific group for accessing the FinSpace application.
+ *
+ * CreateDataset
– Group members can create new datasets.
+ * ManageClusters
– Group members can manage Apache Spark clusters from FinSpace notebooks.
+ * ManageUsersAndGroups
– Group members can manage users and permission groups.
+ * ManageAttributeSets
– Group members can manage attribute sets.
+ * ViewAuditData
– Group members can view audit data.
+ * AccessNotebooks
– Group members will have access to FinSpace notebooks.
+ * GetTemporaryCredentials
– Group members can get temporary API credentials.
The timestamp at which the group was created in FinSpace. The value is determined as epoch time in milliseconds. + *
+ */ + createTime?: number; + + /** + *Describes the last time the permission group was updated. The value is determined as epoch time in milliseconds. + *
+ */ + lastModifiedTime?: number; +} + +export namespace PermissionGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PermissionGroup): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface ListPermissionGroupsResponse { + /** + *A list of all the permission groups.
+ */ + permissionGroups?: PermissionGroup[]; + + /** + *A token that indicates where a results page should begin.
+ */ + nextToken?: string; +} + +export namespace ListPermissionGroupsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionGroupsResponse): any => ({ + ...obj, + ...(obj.permissionGroups && { + permissionGroups: obj.permissionGroups.map((item) => PermissionGroup.filterSensitiveLog(item)), + }), + }); +} + +export interface ListUsersRequest { + /** + *A token that indicates where a results page should begin.
+ */ + nextToken?: string; + + /** + *The maximum number of results per page.
+ */ + maxResults: number | undefined; +} + +export namespace ListUsersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsersRequest): any => ({ + ...obj, + }); +} + +/** + *The details of the user account.
+ */ +export interface User { + /** + *The unique identifier for the user.
+ */ + userId?: string; + + /** + *The current status of the user account.
+ *
+ * CREATING
– The user account creation is in progress.
+ * ENABLED
– The user account is created and is currently active.
+ * DISABLED
– The user account is currently inactive.
The first name of the user.
+ */ + firstName?: string; + + /** + *The last name of the user.
+ */ + lastName?: string; + + /** + *The email address of the user. The email address serves as a uniquer identifier for each user and cannot be changed after it's created.
+ */ + emailAddress?: string; + + /** + *Indicates the type of user.
+ *
+ * SUPER_USER
– A user with permission to all the functionality and data in FinSpace.
+ * APP_USER
– A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.
Indicates whether the user can use the GetProgrammaticAccessCredentials
API to obtain credentials that can then be used to access other FinSpace Data API operations.
+ * ENABLED
– The user has permissions to use the APIs.
+ * DISABLED
– The user does not have permissions to use any APIs.
The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials
API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.
The timestamp at which the user account was created in FinSpace. The value is determined as epoch time in milliseconds.
+ */ + createTime?: number; + + /** + *Describes the last time the user account was enabled. The value is determined as epoch time in milliseconds. + *
+ */ + lastEnabledTime?: number; + + /** + *Describes the last time the user account was disabled. The value is determined as epoch time in milliseconds.
+ */ + lastDisabledTime?: number; + + /** + *Describes the last time the user account was updated. The value is determined as epoch time in milliseconds. + *
+ */ + lastModifiedTime?: number; + + /** + *Describes the last time that the user logged into their account. The value is determined as epoch time in milliseconds. + *
+ */ + lastLoginTime?: number; +} + +export namespace User { + /** + * @internal + */ + export const filterSensitiveLog = (obj: User): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + ...(obj.emailAddress && { emailAddress: SENSITIVE_STRING }), + }); +} + +export interface ListUsersResponse { + /** + *A list of all the user accounts.
+ */ + users?: User[]; + + /** + *A token that indicates where a results page should begin.
+ */ + nextToken?: string; +} + +export namespace ListUsersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsersResponse): any => ({ + ...obj, + ...(obj.users && { users: obj.users.map((item) => User.filterSensitiveLog(item)) }), + }); +} + +export interface ResetUserPasswordRequest { + /** + *The unique identifier of the user that a temporary password is requested for.
+ */ + userId: string | undefined; + + /** + *A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace ResetUserPasswordRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResetUserPasswordRequest): any => ({ + ...obj, + }); +} + +export interface ResetUserPasswordResponse { + /** + *The unique identifier of the user that a new password is generated for.
+ */ + userId?: string; + + /** + *A randomly generated temporary password for the requested user account. This password expires in 7 days.
+ */ + temporaryPassword?: string; +} + +export namespace ResetUserPasswordResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResetUserPasswordResponse): any => ({ + ...obj, + ...(obj.temporaryPassword && { temporaryPassword: SENSITIVE_STRING }), + }); +} + /** * Request to update an existing changeset. */ @@ -1940,19 +2710,19 @@ export interface UpdateChangesetRequest { *
- * PARQUET
- Parquet source file format.
PARQUET
– Parquet source file format.
*
- * CSV
- CSV source file format.
CSV
– CSV source file format.
*
- * JSON
- JSON source file format.
JSON
– JSON source file format.
*
- * XML
- XML source file format.
XML
– XML source file format.
*
- * TABULAR
- Data is structured in a tabular format.
TABULAR
– Data is structured in a tabular format.
*
- * NON_TABULAR
- Data is structured in a non-tabular format.
NON_TABULAR
– Data is structured in a non-tabular format.
* The unique identifier for the permission group to update.
+ */ + permissionGroupId: string | undefined; + + /** + *The name of the permission group.
+ */ + name?: string; + + /** + *A brief description for the permission group.
+ */ + description?: string; + + /** + *The permissions that are granted to a specific group for accessing the FinSpace application.
+ *
+ * CreateDataset
– Group members can create new datasets.
+ * ManageClusters
– Group members can manage Apache Spark clusters from FinSpace notebooks.
+ * ManageUsersAndGroups
– Group members can manage users and permission groups.
+ * ManageAttributeSets
– Group members can manage attribute sets.
+ * ViewAuditData
– Group members can view audit data.
+ * AccessNotebooks
– Group members will have access to FinSpace notebooks.
+ * GetTemporaryCredentials
– Group members can get temporary API credentials.
A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace UpdatePermissionGroupRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePermissionGroupRequest): any => ({ + ...obj, + ...(obj.name && { name: SENSITIVE_STRING }), + ...(obj.description && { description: SENSITIVE_STRING }), + }); +} + +export interface UpdatePermissionGroupResponse { + /** + *The unique identifier for the updated permission group.
+ */ + permissionGroupId?: string; +} + +export namespace UpdatePermissionGroupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePermissionGroupResponse): any => ({ + ...obj, + }); +} + +export interface UpdateUserRequest { + /** + *The unique identifier for the user account to update.
+ */ + userId: string | undefined; + + /** + *The option to indicate the type of user.
+ *
+ * SUPER_USER
– A user with permission to all the functionality and data in FinSpace.
+ * APP_USER
– A user with specific permissions in FinSpace. The users are assigned permissions by adding them to a permissions group.
The first name of the user.
+ */ + firstName?: string; + + /** + *The last name of the user.
+ */ + lastName?: string; + + /** + *The option to indicate whether the user can use the GetProgrammaticAccessCredentials
API to obtain credentials that can then be used to access other FinSpace Data API operations.
+ * ENABLED
– The user has permissions to use the APIs.
+ * DISABLED
– The user does not have permissions to use any APIs.
The ARN identifier of an AWS user or role that is allowed to call the GetProgrammaticAccessCredentials
API to obtain a credentials token for a specific FinSpace user. This must be an IAM role within your FinSpace account.
A token that ensures idempotency. This token expires in 10 minutes.
+ */ + clientToken?: string; +} + +export namespace UpdateUserRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserRequest): any => ({ + ...obj, + ...(obj.firstName && { firstName: SENSITIVE_STRING }), + ...(obj.lastName && { lastName: SENSITIVE_STRING }), + }); +} + +export interface UpdateUserResponse { + /** + *The unique identifier of the updated user account.
+ */ + userId?: string; +} + +export namespace UpdateUserResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts b/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts new file mode 100644 index 000000000000..4b245d4dcd0c --- /dev/null +++ b/clients/client-finspace-data/src/pagination/ListPermissionGroupsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListPermissionGroupsCommand, + ListPermissionGroupsCommandInput, + ListPermissionGroupsCommandOutput, +} from "../commands/ListPermissionGroupsCommand"; +import { FinspaceData } from "../FinspaceData"; +import { FinspaceDataClient } from "../FinspaceDataClient"; +import { FinspaceDataPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FinspaceDataClient, + input: ListPermissionGroupsCommandInput, + ...args: any +): PromiseSpecifies the configuration for experiment logging to Amazon CloudWatch Logs.
+ */ +export interface ExperimentTemplateCloudWatchLogsLogConfigurationInput { + /** + *The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.
+ */ + logGroupArn: string | undefined; +} + +export namespace ExperimentTemplateCloudWatchLogsLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateCloudWatchLogsLogConfigurationInput): any => ({ + ...obj, + }); +} + +/** + *Specifies the configuration for experiment logging to Amazon S3.
+ */ +export interface ExperimentTemplateS3LogConfigurationInput { + /** + *The name of the destination bucket.
+ */ + bucketName: string | undefined; + + /** + *The bucket prefix.
+ */ + prefix?: string; +} + +export namespace ExperimentTemplateS3LogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateS3LogConfigurationInput): any => ({ + ...obj, + }); +} + +/** + *Specifies the configuration for experiment logging.
+ */ +export interface CreateExperimentTemplateLogConfigurationInput { + /** + *The configuration for experiment logging to Amazon CloudWatch Logs.
+ */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfigurationInput; + + /** + *The configuration for experiment logging to Amazon S3.
+ */ + s3Configuration?: ExperimentTemplateS3LogConfigurationInput; + + /** + *The schema version.
+ */ + logSchemaVersion: number | undefined; +} + +export namespace CreateExperimentTemplateLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateExperimentTemplateLogConfigurationInput): any => ({ + ...obj, + }); +} + /** *Specifies a stop condition for an experiment template.
*/ @@ -328,6 +400,11 @@ export interface CreateExperimentTemplateRequest { *The tags to apply to the experiment template.
*/ tags?: { [key: string]: string }; + + /** + *The configuration for experiment logging.
+ */ + logConfiguration?: CreateExperimentTemplateLogConfigurationInput; } export namespace CreateExperimentTemplateRequest { @@ -378,6 +455,78 @@ export namespace ExperimentTemplateAction { }); } +/** + *Describes the configuration for experiment logging to Amazon CloudWatch Logs.
+ */ +export interface ExperimentTemplateCloudWatchLogsLogConfiguration { + /** + *The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.
+ */ + logGroupArn?: string; +} + +export namespace ExperimentTemplateCloudWatchLogsLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateCloudWatchLogsLogConfiguration): any => ({ + ...obj, + }); +} + +/** + *Describes the configuration for experiment logging to Amazon S3.
+ */ +export interface ExperimentTemplateS3LogConfiguration { + /** + *The name of the destination bucket.
+ */ + bucketName?: string; + + /** + *The bucket prefix.
+ */ + prefix?: string; +} + +export namespace ExperimentTemplateS3LogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateS3LogConfiguration): any => ({ + ...obj, + }); +} + +/** + *Describes the configuration for experiment logging.
+ */ +export interface ExperimentTemplateLogConfiguration { + /** + *The configuration for experiment logging to Amazon CloudWatch Logs.
+ */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfiguration; + + /** + *The configuration for experiment logging to Amazon S3.
+ */ + s3Configuration?: ExperimentTemplateS3LogConfiguration; + + /** + *The schema version.
+ */ + logSchemaVersion?: number; +} + +export namespace ExperimentTemplateLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentTemplateLogConfiguration): any => ({ + ...obj, + }); +} + /** *Describes a stop condition for an experiment template.
*/ @@ -518,6 +667,11 @@ export interface ExperimentTemplate { *The tags for the experiment template.
*/ tags?: { [key: string]: string }; + + /** + *The configuration for experiment logging.
+ */ + logConfiguration?: ExperimentTemplateLogConfiguration; } export namespace ExperimentTemplate { @@ -723,6 +877,78 @@ export namespace ExperimentAction { }); } +/** + *Describes the configuration for experiment logging to Amazon CloudWatch Logs.
+ */ +export interface ExperimentCloudWatchLogsLogConfiguration { + /** + *The Amazon Resource Name (ARN) of the destination Amazon CloudWatch Logs log group.
+ */ + logGroupArn?: string; +} + +export namespace ExperimentCloudWatchLogsLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentCloudWatchLogsLogConfiguration): any => ({ + ...obj, + }); +} + +/** + *Describes the configuration for experiment logging to Amazon S3.
+ */ +export interface ExperimentS3LogConfiguration { + /** + *The name of the destination bucket.
+ */ + bucketName?: string; + + /** + *The bucket prefix.
+ */ + prefix?: string; +} + +export namespace ExperimentS3LogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentS3LogConfiguration): any => ({ + ...obj, + }); +} + +/** + *Describes the configuration for experiment logging.
+ */ +export interface ExperimentLogConfiguration { + /** + *The configuration for experiment logging to Amazon CloudWatch Logs.
+ */ + cloudWatchLogsConfiguration?: ExperimentCloudWatchLogsLogConfiguration; + + /** + *The configuration for experiment logging to Amazon S3.
+ */ + s3Configuration?: ExperimentS3LogConfiguration; + + /** + *The schema version.
+ */ + logSchemaVersion?: number; +} + +export namespace ExperimentLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentLogConfiguration): any => ({ + ...obj, + }); +} + export enum ExperimentStatus { completed = "completed", failed = "failed", @@ -907,6 +1133,11 @@ export interface Experiment { *The tags for the experiment.
*/ tags?: { [key: string]: string }; + + /** + *The configuration for experiment logging.
+ */ + logConfiguration?: ExperimentLogConfiguration; } export namespace Experiment { @@ -1579,6 +1810,35 @@ export namespace UpdateExperimentTemplateActionInputItem { }); } +/** + *Specifies the configuration for experiment logging.
+ */ +export interface UpdateExperimentTemplateLogConfigurationInput { + /** + *The configuration for experiment logging to Amazon CloudWatch Logs.
+ */ + cloudWatchLogsConfiguration?: ExperimentTemplateCloudWatchLogsLogConfigurationInput; + + /** + *The configuration for experiment logging to Amazon S3.
+ */ + s3Configuration?: ExperimentTemplateS3LogConfigurationInput; + + /** + *The schema version.
+ */ + logSchemaVersion?: number; +} + +export namespace UpdateExperimentTemplateLogConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateExperimentTemplateLogConfigurationInput): any => ({ + ...obj, + }); +} + /** *Specifies a stop condition for an experiment. You can define a stop condition as a CloudWatch alarm.
*/ @@ -1679,6 +1939,11 @@ export interface UpdateExperimentTemplateRequest { *The Amazon Resource Name (ARN) of an IAM role that grants the FIS service permission to perform service actions on your behalf.
*/ roleArn?: string; + + /** + *The configuration for experiment logging.
+ */ + logConfiguration?: UpdateExperimentTemplateLogConfigurationInput; } export namespace UpdateExperimentTemplateRequest { diff --git a/clients/client-fis/src/protocols/Aws_restJson1.ts b/clients/client-fis/src/protocols/Aws_restJson1.ts index 487645ff9fb7..0900b791dbe5 100644 --- a/clients/client-fis/src/protocols/Aws_restJson1.ts +++ b/clients/client-fis/src/protocols/Aws_restJson1.ts @@ -2,6 +2,7 @@ import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@a import { decorateServiceException as __decorateServiceException, expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, expectNonNull as __expectNonNull, expectNumber as __expectNumber, expectObject as __expectObject, @@ -64,11 +65,15 @@ import { ActionTarget, ConflictException, CreateExperimentTemplateActionInput, + CreateExperimentTemplateLogConfigurationInput, CreateExperimentTemplateStopConditionInput, CreateExperimentTemplateTargetInput, Experiment, ExperimentAction, ExperimentActionState, + ExperimentCloudWatchLogsLogConfiguration, + ExperimentLogConfiguration, + ExperimentS3LogConfiguration, ExperimentState, ExperimentStopCondition, ExperimentSummary, @@ -76,6 +81,11 @@ import { ExperimentTargetFilter, ExperimentTemplate, ExperimentTemplateAction, + ExperimentTemplateCloudWatchLogsLogConfiguration, + ExperimentTemplateCloudWatchLogsLogConfigurationInput, + ExperimentTemplateLogConfiguration, + ExperimentTemplateS3LogConfiguration, + ExperimentTemplateS3LogConfigurationInput, ExperimentTemplateStopCondition, ExperimentTemplateSummary, ExperimentTemplateTarget, @@ -87,6 +97,7 @@ import { TargetResourceTypeParameter, TargetResourceTypeSummary, UpdateExperimentTemplateActionInputItem, + UpdateExperimentTemplateLogConfigurationInput, UpdateExperimentTemplateStopConditionInput, UpdateExperimentTemplateTargetInput, ValidationException, @@ -109,6 +120,13 @@ export const serializeAws_restJson1CreateExperimentTemplateCommand = async ( }), clientToken: input.clientToken ?? generateIdempotencyToken(), ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.logConfiguration !== undefined && + input.logConfiguration !== null && { + logConfiguration: serializeAws_restJson1CreateExperimentTemplateLogConfigurationInput( + input.logConfiguration, + context + ), + }), ...(input.roleArn !== undefined && input.roleArn !== null && { roleArn: input.roleArn }), ...(input.stopConditions !== undefined && input.stopConditions !== null && { @@ -547,6 +565,13 @@ export const serializeAws_restJson1UpdateExperimentTemplateCommand = async ( actions: serializeAws_restJson1UpdateExperimentTemplateActionInputMap(input.actions, context), }), ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.logConfiguration !== undefined && + input.logConfiguration !== null && { + logConfiguration: serializeAws_restJson1UpdateExperimentTemplateLogConfigurationInput( + input.logConfiguration, + context + ), + }), ...(input.roleArn !== undefined && input.roleArn !== null && { roleArn: input.roleArn }), ...(input.stopConditions !== undefined && input.stopConditions !== null && { @@ -1423,6 +1448,30 @@ const serializeAws_restJson1CreateExperimentTemplateActionInputMap = ( }, {}); }; +const serializeAws_restJson1CreateExperimentTemplateLogConfigurationInput = ( + input: CreateExperimentTemplateLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.cloudWatchLogsConfiguration !== undefined && + input.cloudWatchLogsConfiguration !== null && { + cloudWatchLogsConfiguration: serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput( + input.cloudWatchLogsConfiguration, + context + ), + }), + ...(input.logSchemaVersion !== undefined && + input.logSchemaVersion !== null && { logSchemaVersion: input.logSchemaVersion }), + ...(input.s3Configuration !== undefined && + input.s3Configuration !== null && { + s3Configuration: serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput( + input.s3Configuration, + context + ), + }), + }; +}; + const serializeAws_restJson1CreateExperimentTemplateStopConditionInput = ( input: CreateExperimentTemplateStopConditionInput, context: __SerdeContext @@ -1530,6 +1579,25 @@ const serializeAws_restJson1ExperimentTemplateActionTargetMap = ( }, {}); }; +const serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput = ( + input: ExperimentTemplateCloudWatchLogsLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.logGroupArn !== undefined && input.logGroupArn !== null && { logGroupArn: input.logGroupArn }), + }; +}; + +const serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput = ( + input: ExperimentTemplateS3LogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.bucketName !== undefined && input.bucketName !== null && { bucketName: input.bucketName }), + ...(input.prefix !== undefined && input.prefix !== null && { prefix: input.prefix }), + }; +}; + const serializeAws_restJson1ExperimentTemplateTargetFilterInputList = ( input: ExperimentTemplateTargetInputFilter[], context: __SerdeContext @@ -1643,6 +1711,30 @@ const serializeAws_restJson1UpdateExperimentTemplateActionInputMap = ( }, {}); }; +const serializeAws_restJson1UpdateExperimentTemplateLogConfigurationInput = ( + input: UpdateExperimentTemplateLogConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.cloudWatchLogsConfiguration !== undefined && + input.cloudWatchLogsConfiguration !== null && { + cloudWatchLogsConfiguration: serializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfigurationInput( + input.cloudWatchLogsConfiguration, + context + ), + }), + ...(input.logSchemaVersion !== undefined && + input.logSchemaVersion !== null && { logSchemaVersion: input.logSchemaVersion }), + ...(input.s3Configuration !== undefined && + input.s3Configuration !== null && { + s3Configuration: serializeAws_restJson1ExperimentTemplateS3LogConfigurationInput( + input.s3Configuration, + context + ), + }), + }; +}; + const serializeAws_restJson1UpdateExperimentTemplateStopConditionInput = ( input: UpdateExperimentTemplateStopConditionInput, context: __SerdeContext @@ -1811,6 +1903,10 @@ const deserializeAws_restJson1Experiment = (output: any, context: __SerdeContext : undefined, experimentTemplateId: __expectString(output.experimentTemplateId), id: __expectString(output.id), + logConfiguration: + output.logConfiguration !== undefined && output.logConfiguration !== null + ? deserializeAws_restJson1ExperimentLogConfiguration(output.logConfiguration, context) + : undefined, roleArn: __expectString(output.roleArn), startTime: output.startTime !== undefined && output.startTime !== null @@ -1930,6 +2026,42 @@ const deserializeAws_restJson1ExperimentActionTargetMap = ( }, {}); }; +const deserializeAws_restJson1ExperimentCloudWatchLogsLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentCloudWatchLogsLogConfiguration => { + return { + logGroupArn: __expectString(output.logGroupArn), + } as any; +}; + +const deserializeAws_restJson1ExperimentLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentLogConfiguration => { + return { + cloudWatchLogsConfiguration: + output.cloudWatchLogsConfiguration !== undefined && output.cloudWatchLogsConfiguration !== null + ? deserializeAws_restJson1ExperimentCloudWatchLogsLogConfiguration(output.cloudWatchLogsConfiguration, context) + : undefined, + logSchemaVersion: __expectInt32(output.logSchemaVersion), + s3Configuration: + output.s3Configuration !== undefined && output.s3Configuration !== null + ? deserializeAws_restJson1ExperimentS3LogConfiguration(output.s3Configuration, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentS3LogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentS3LogConfiguration => { + return { + bucketName: __expectString(output.bucketName), + prefix: __expectString(output.prefix), + } as any; +}; + const deserializeAws_restJson1ExperimentState = (output: any, context: __SerdeContext): ExperimentState => { return { reason: __expectString(output.reason), @@ -2102,6 +2234,10 @@ const deserializeAws_restJson1ExperimentTemplate = (output: any, context: __Serd output.lastUpdateTime !== undefined && output.lastUpdateTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdateTime))) : undefined, + logConfiguration: + output.logConfiguration !== undefined && output.logConfiguration !== null + ? deserializeAws_restJson1ExperimentTemplateLogConfiguration(output.logConfiguration, context) + : undefined, roleArn: __expectString(output.roleArn), stopConditions: output.stopConditions !== undefined && output.stopConditions !== null @@ -2203,6 +2339,45 @@ const deserializeAws_restJson1ExperimentTemplateActionTargetMap = ( }, {}); }; +const deserializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateCloudWatchLogsLogConfiguration => { + return { + logGroupArn: __expectString(output.logGroupArn), + } as any; +}; + +const deserializeAws_restJson1ExperimentTemplateLogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateLogConfiguration => { + return { + cloudWatchLogsConfiguration: + output.cloudWatchLogsConfiguration !== undefined && output.cloudWatchLogsConfiguration !== null + ? deserializeAws_restJson1ExperimentTemplateCloudWatchLogsLogConfiguration( + output.cloudWatchLogsConfiguration, + context + ) + : undefined, + logSchemaVersion: __expectInt32(output.logSchemaVersion), + s3Configuration: + output.s3Configuration !== undefined && output.s3Configuration !== null + ? deserializeAws_restJson1ExperimentTemplateS3LogConfiguration(output.s3Configuration, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentTemplateS3LogConfiguration = ( + output: any, + context: __SerdeContext +): ExperimentTemplateS3LogConfiguration => { + return { + bucketName: __expectString(output.bucketName), + prefix: __expectString(output.prefix), + } as any; +}; + const deserializeAws_restJson1ExperimentTemplateStopCondition = ( output: any, context: __SerdeContext diff --git a/clients/client-fsx/src/FSx.ts b/clients/client-fsx/src/FSx.ts index 0e1e98178a2e..50f0c1e32de3 100644 --- a/clients/client-fsx/src/FSx.ts +++ b/clients/client-fsx/src/FSx.ts @@ -577,8 +577,8 @@ export class FSx extends FSxClient { * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup. * *If a file system with the specified client request token exists and the parameters
- * match, this operation returns the description of the file system. If a client request
- * token with the specified by the file system exists and the parameters don't match, this
+ * match, this operation returns the description of the file system. If a file system
+ * with the specified client request token exists but the parameters don't match, this
* call returns IncompatibleParameterError
. If a file system with the
* specified client request token doesn't exist, this operation does the following:
Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + *
Creates a snapshot of an existing Amazon FSx for OpenZFS volume. With * snapshots, you can easily undo file changes and compare file versions by restoring the * volume to a previous version.
*If a snapshot with the specified client request token exists, and the parameters
@@ -649,7 +649,7 @@ export class FSx extends FSxClient {
* with the specified client request token exists, and the parameters don't match, this
* operation returns IncompatibleParameterError
. If a snapshot with the
* specified client request token doesn't exist, CreateSnapshot
does the
- * following:
Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle @@ -668,7 +668,7 @@ export class FSx extends FSxClient { *
The CreateSnapshot
operation returns while the snapshot's lifecycle state
* is still CREATING
. You can check the snapshot creation status by calling
* the DescribeSnapshots operation, which returns the snapshot state along with
- * other information.
Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage - * volume.
+ *Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.
*/ public createVolume( args: CreateVolumeCommandInput, @@ -927,7 +926,7 @@ export class FSx extends FSxClient { } /** - *Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + *
Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a * file system backup.
*The DeleteSnapshot
operation returns instantly. The snapshot appears with
@@ -1275,7 +1274,7 @@ export class FSx extends FSxClient {
}
/**
- *
Returns the description of specific Amazon FSx snapshots, if a + *
Returns the description of specific Amazon FSx for OpenZFS snapshots, if a
* SnapshotIds
value is provided. Otherwise, this operation returns all
* snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of
* the endpoint that you're calling.
Updates the name of a snapshot.
+ *Updates the name of an Amazon FSx for OpenZFS snapshot.
*/ public updateSnapshot( args: UpdateSnapshotCommandInput, diff --git a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts index cfee43a97b5c..10ea35d7fc18 100644 --- a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts +++ b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts @@ -26,8 +26,8 @@ export interface CreateFileSystemFromBackupCommandOutput extends CreateFileSyste * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup. * *If a file system with the specified client request token exists and the parameters
- * match, this operation returns the description of the file system. If a client request
- * token with the specified by the file system exists and the parameters don't match, this
+ * match, this operation returns the description of the file system. If a file system
+ * with the specified client request token exists but the parameters don't match, this
* call returns IncompatibleParameterError
. If a file system with the
* specified client request token doesn't exist, this operation does the following:
Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + *
Creates a snapshot of an existing Amazon FSx for OpenZFS volume. With * snapshots, you can easily undo file changes and compare file versions by restoring the * volume to a previous version.
*If a snapshot with the specified client request token exists, and the parameters
@@ -30,7 +30,7 @@ export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __M
* with the specified client request token exists, and the parameters don't match, this
* operation returns IncompatibleParameterError
. If a snapshot with the
* specified client request token doesn't exist, CreateSnapshot
does the
- * following:
Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle @@ -49,7 +49,7 @@ export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __M *
The CreateSnapshot
operation returns while the snapshot's lifecycle state
* is still CREATING
. You can check the snapshot creation status by calling
* the DescribeSnapshots operation, which returns the snapshot state along with
- * other information.
Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage - * volume.
+ *Creates an FSx for ONTAP or Amazon FSx for OpenZFS storage volume.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts index 6622571da7c4..0c14b04c789a 100644 --- a/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts +++ b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts @@ -22,7 +22,7 @@ export interface DeleteSnapshotCommandInput extends DeleteSnapshotRequest {} export interface DeleteSnapshotCommandOutput extends DeleteSnapshotResponse, __MetadataBearer {} /** - *Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + *
Deletes an Amazon FSx for OpenZFS snapshot. After deletion, the snapshot no longer * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a * file system backup.
*The DeleteSnapshot
operation returns instantly. The snapshot appears with
diff --git a/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts
index 56aa6a9584ce..566576da509f 100644
--- a/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts
+++ b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts
@@ -22,7 +22,7 @@ export interface DescribeSnapshotsCommandInput extends DescribeSnapshotsRequest
export interface DescribeSnapshotsCommandOutput extends DescribeSnapshotsResponse, __MetadataBearer {}
/**
- *
Returns the description of specific Amazon FSx snapshots, if a + *
Returns the description of specific Amazon FSx for OpenZFS snapshots, if a
* SnapshotIds
value is provided. Otherwise, this operation returns all
* snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of
* the endpoint that you're calling.
Updates the name of a snapshot.
+ *Updates the name of an Amazon FSx for OpenZFS snapshot.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/models/models_0.ts b/clients/client-fsx/src/models/models_0.ts index 348a99494160..da2af7fa3d92 100644 --- a/clients/client-fsx/src/models/models_0.ts +++ b/clients/client-fsx/src/models/models_0.ts @@ -642,6 +642,12 @@ export interface OntapFileSystemConfiguration { /** *The IP address range in which the endpoints to access your file system * are created.
+ *The Endpoint IP address range you select for your file system + * must exist outside the VPC's CIDR range and must be at least /30 or larger. + * If you do not specify this optional parameter, Amazon FSx will automatically + * select a CIDR block for you.
+ *The throughput of an Amazon FSx file system, measured in megabytes per second - * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).
+ * (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s. */ ThroughputCapacity?: number; @@ -1182,16 +1188,6 @@ export enum SnapshotLifecycle { PENDING = "PENDING", } -export enum VolumeLifecycle { - AVAILABLE = "AVAILABLE", - CREATED = "CREATED", - CREATING = "CREATING", - DELETING = "DELETING", - FAILED = "FAILED", - MISCONFIGURED = "MISCONFIGURED", - PENDING = "PENDING", -} - /** *Describes why a resource lifecycle state changed.
*/ @@ -1211,6 +1207,16 @@ export namespace LifecycleTransitionReason { }); } +export enum VolumeLifecycle { + AVAILABLE = "AVAILABLE", + CREATED = "CREATED", + CREATING = "CREATING", + DELETING = "DELETING", + FAILED = "FAILED", + MISCONFIGURED = "MISCONFIGURED", + PENDING = "PENDING", +} + export enum FlexCacheEndpointType { CACHE = "CACHE", NONE = "NONE", @@ -1423,6 +1429,7 @@ export namespace OntapVolumeConfiguration { } export enum OpenZFSDataCompressionType { + LZ4 = "LZ4", NONE = "NONE", ZSTD = "ZSTD", } @@ -1435,7 +1442,7 @@ export interface OpenZFSClientConfiguration { /** *A value that specifies who can mount the file system. You can provide a wildcard
* character (*
), an IP address (0.0.0.0
), or a CIDR address
- * (192.0.2.0/24
. By default, Amazon FSx uses the wildcard
+ * (192.0.2.0/24
). By default, Amazon FSx uses the wildcard
* character when specifying the client.
- * crossmount
is used by default. If you don't specify
- * crossmount
when changing the client configuration, you won't be
+ * crossmnt
is used by default. If you don't specify
+ * crossmnt
when changing the client configuration, you won't be
* able to see or access snapshots in your file system's snapshot directory.
The Network File System NFS) configurations for mounting an Amazon FSx for + *
The Network File System (NFS) configurations for mounting an Amazon FSx for * OpenZFS file system.
*/ export interface OpenZFSNfsExport { @@ -1602,19 +1609,34 @@ export interface OpenZFSVolumeConfiguration { StorageCapacityQuotaGiB?: number; /** - *The method used to compress the data on the volume. Unless a compression type is
- * specified, volumes inherit the DataCompressionType
value of their parent
- * volume.
The record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * Most workloads should use the default record size. For guidance on when + * to set a custom record size, see the + * Amazon FSx for OpenZFS User Guide.
+ */ + RecordSizeKiB?: number; + + /** + *Specifies the method used to compress the data on the volume. The compression
+ * type is NONE
by default.
- * NONE
- Doesn't compress the data on the volume.
NONE
- Doesn't compress the data on the volume.
+ * NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard
- * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on
- * your volume and has very little impact on compute resources.
+ * LZ4
- Compresses the data in the volume using the LZ4
+ * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive
+ * and delivers higher write throughput speeds.
true
and you specify one or more tags, only the specified tags are
* copied to snapshots. If you specify one or more tags when creating the snapshot, no tags
- * are copied from the volume, regardless of this value.
+ * are copied from the volume, regardless of this value.
*/
CopyTagsToSnapshots?: boolean;
@@ -1642,13 +1664,13 @@ export interface OpenZFSVolumeConfiguration {
ReadOnly?: boolean;
/**
- * The configuration object for mounting a Network File System (NFS) file - * system.
+ *The configuration object for mounting a Network File System (NFS) + * file system.
*/ NfsExports?: OpenZFSNfsExport[]; /** - *An object specifying how much storage users or groups can use on the volume.
+ *An object specifying how much storage users or groups can use on the volume.
*/ UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; } @@ -2451,7 +2473,7 @@ export namespace CreateBackupRequest { } /** - *No Amazon FSx for NetApp ONTAP volumes were found based upon the supplied parameters.
+ *No Amazon FSx volumes were found based upon the supplied parameters.
*/ export class VolumeNotFound extends __BaseException { readonly name: "VolumeNotFound" = "VolumeNotFound"; @@ -2528,6 +2550,11 @@ export interface CreateDataRepositoryAssociationRequest { *This path specifies where in your file system files will be exported * from or imported to. This file system directory can be linked to only one * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
+ *If you specify only a forward slash (/
) as the file system
+ * path, you can link only 1 data repository to the file system. You can only specify
+ * "/" as the file system path for the first data repository associated with a file system.
/ns1/ns2
.
* This path specifies where in your file system files will be exported * from or imported to. This file system directory can be linked to only one - * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.
+ * Amazon S3 bucket, and no other S3 bucket can be linked to the directory. + *If you specify only a forward slash (/
) as the file system
+ * path, you can link only 1 data repository to the file system. You can only specify
+ * "/" as the file system path for the first data repository associated with a file system.
For more information, see
+ * For more information, see
* Automatically import updates from your S3 bucket. This parameter is not supported for file systems with the Specifies the IP address range in which the endpoints to access your file system
* will be created. By default, Amazon FSx selects an unused IP address range for you
* from the 198.19.* range. The Endpoint IP address range you select for your file system
+ * must exist outside the VPC's CIDR range and must be at least /30 or larger. Specifies the method used to compress the data on the volume. Unless the compression
- * type is specified, volumes inherit the Specifies the record size of an OpenZFS root volume, in kibibytes (KiB). Valid values are 4, 8,
+ * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the
+ * default record size. Database workflows can benefit from a smaller record size, while streaming
+ * workflows can benefit from a larger record size. For additional guidance on setting a custom record
+ * size, see
+ * Tips for maximizing performance in the
+ * Amazon FSx for OpenZFS User Guide. Specifies the method used to compress the data on the volume. The compression
+ * type is
- * Persistent_2
deployment type.
- * Instead, use CreateDataRepositoryAssociation"
to create
+ * Instead, use CreateDataRepositoryAssociation
to create
* a data repository association to link your Lustre file system to a data repository.DataCompressionType
value of their
- * parent volume.NONE
by default.
*
NONE
- Doesn't compress the data on the volume.NONE
- Doesn't compress the data on the volume.
+ * NONE
is the default.
- * ZSTD
- Compresses the data in the volume using the ZStandard
- * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on
- * your volume and has very little impact on compute resources.
ZSTD
- Compresses the data in the volume using the Zstandard
+ * (ZSTD) compression algorithm. Compared to LZ4, Z-Standard provides a better
+ * compression ratio to minimize on-disk storage utilization.
+ *
+ * LZ4
- Compresses the data in the volume using the LZ4
+ * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive
+ * and delivers higher write throughput speeds.
A Boolean value indicating whether tags for the volume should be copied to snapshots.
- * This value defaults to false
. If it's set to true
, all tags
- * for the volume are copied to snapshots where the user doesn't specify tags. If this
+ *
A Boolean value indicating whether tags for the volume should be copied to snapshots
+ * of the volume. This value defaults to false
. If it's set to true
,
+ * all tags for the volume are copied to snapshots where the user doesn't specify tags. If this
* value is true
and you specify one or more tags, only the specified tags are
* copied to snapshots. If you specify one or more tags when creating the snapshot, no tags
* are copied from the volume, regardless of this value.
The OpenZFS configuration properties for the file system that you are creating.
+ *The Amazon FSx for OpenZFS configuration properties for the file system that you are creating.
*/ export interface CreateFileSystemOpenZFSConfiguration { /** @@ -3643,8 +3696,8 @@ export interface CreateFileSystemOpenZFSConfiguration { /** *Specifies the file system deployment type. Amazon FSx for OpenZFS supports
- * SINGLE_AZ_1
. SINGLE_AZ_1
is a file system configured for a
- * single Availability Zone (AZ) of redundancy.
SINGLE_AZ_1
. SINGLE_AZ_1
deployment type is configured for redundancy
+ * within a single Availability Zone.
*/
DeploymentType: OpenZFSDeploymentType | string | undefined;
@@ -5017,42 +5070,77 @@ export namespace CreateOpenZFSOriginSnapshotConfiguration {
}
/**
- * Specifies the configuration of the OpenZFS volume that you are creating.
+ *Specifies the configuration of the Amazon FSx for OpenZFS volume that you are creating.
*/ export interface CreateOpenZFSVolumeConfiguration { /** - *The ID of the volume to use as the parent volume.
+ *The ID of the volume to use as the parent volume of the volume that you are creating.
*/ ParentVolumeId: string | undefined; /** - *The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't - * reserve more storage than the parent volume has reserved.
+ *Specifies the amount of storage in gibibytes (GiB) to reserve from the parent volume. Setting
+ * StorageCapacityReservationGiB
guarantees that the specified amount of storage space
+ * on the parent volume will always be available for the volume.
+ * You can't reserve more storage than the parent volume has. To not specify a storage capacity
+ * reservation, set this to 0
or -1
. For more information, see
+ * Volume properties
+ * in the Amazon FSx for OpenZFS User Guide.
The maximum amount of storage in gibibytes (GiB) that the volume can use from its - * parent. You can specify a quota larger than the storage on the parent volume.
+ *Sets the maximum storage size in gibibytes (GiB) for the volume. You can specify
+ * a quota that is larger than the storage on the parent volume. A volume quota limits
+ * the amount of storage that the volume can consume to the configured amount, but does not
+ * guarantee the space will be available on the parent volume. To guarantee quota space, you must also set
+ * StorageCapacityReservationGiB
. To not specify a storage capacity quota, set this to -1
.
+ *
For more information, see + * Volume properties + * in the Amazon FSx for OpenZFS User Guide.
*/ StorageCapacityQuotaGiB?: number; /** - *Specifies the method used to compress the data on the volume. Unless the compression
- * type is specified, volumes inherit the DataCompressionType
value of their
- * parent volume.
Specifies the suggested block size for a volume in a ZFS dataset, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * We recommend using the default setting for the majority of use cases. + * Generally, workloads that write in fixed small or large record sizes + * may benefit from setting a custom record size, like database workloads + * (small record size) or media streaming workloads (large record size). + * For additional guidance on when + * to set a custom record size, see + * + * ZFS Record size in the Amazon FSx for OpenZFS User Guide.
+ */ + RecordSizeKiB?: number; + + /** + *Specifies the method used to compress the data on the volume. The compression
+ * type is NONE
by default.
- * NONE
- Doesn't compress the data on the volume.
NONE
- Doesn't compress the data on the volume.
+ * NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard
- * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on
- * your volume and has very little impact on compute resources.
+ * LZ4
- Compresses the data in the volume using the LZ4
+ * compression algorithm. LZ4 compression provides a lower level of compression
+ * and higher write throughput performance than ZSTD compression.
For more information about volume compression types and the performance of your Amazon FSx for OpenZFS file system, + * see + * Tips for maximizing performance File system and volume settings in the Amazon FSx for OpenZFS User Guide.
*/ DataCompressionType?: OpenZFSDataCompressionType | string; @@ -5062,7 +5150,7 @@ export interface CreateOpenZFSVolumeConfiguration { * for the volume are copied to snapshots where the user doesn't specify tags. If this * value istrue
, and you specify one or more tags, only the specified tags
* are copied to snapshots. If you specify one or more tags when creating the snapshot, no
- * tags are copied from the volume, regardless of this value.
+ * tags are copied from the volume, regardless of this value.
*/
CopyTagsToSnapshots?: boolean;
@@ -5073,17 +5161,17 @@ export interface CreateOpenZFSVolumeConfiguration {
OriginSnapshot?: CreateOpenZFSOriginSnapshotConfiguration;
/**
- * A Boolean value indicating whether the volume is read-only.
+ *A Boolean value indicating whether the volume is read-only.
*/ ReadOnly?: boolean; /** - *The configuration object for mounting a Network File System (NFS) file system.
+ *The configuration object for mounting a Network File System (NFS) file system.
*/ NfsExports?: OpenZFSNfsExport[]; /** - *An object specifying how much storage users or groups can use on the volume.
+ *An object specifying how much storage users or groups can use on the volume.
*/ UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; } @@ -5166,7 +5254,7 @@ export class MissingVolumeConfiguration extends __BaseException { } /** - *No Amazon FSx for NetApp ONTAP SVMs were found based upon the supplied parameters.
+ *No FSx for ONTAP SVMs were found based upon the supplied parameters.
*/ export class StorageVirtualMachineNotFound extends __BaseException { readonly name: "StorageVirtualMachineNotFound" = "StorageVirtualMachineNotFound"; @@ -5452,8 +5540,12 @@ export namespace DeleteFileSystemLustreConfiguration { }); } +export enum DeleteFileSystemOpenZFSOption { + DELETE_CHILD_VOLUMES_AND_SNAPSHOTS = "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS", +} + /** - *The configuration object for the OpenZFS file system used in the + *
The configuration object for the Amazon FSx for OpenZFS file system used in the
* DeleteFileSystem
operation.
By default, Amazon FSx for OpenZFS takes a final backup on your behalf when
* the DeleteFileSystem
operation is invoked. Doing this helps protect you
* from data loss, and we highly recommend taking the final backup. If you want to skip
- * this backup, use this
- * value
- * to do so.
true
.
*/
SkipFinalBackup?: boolean;
/**
- * A list of Tag
values, with a maximum of 50 elements.
A list of tags to apply to the file system's final backup.
*/ FinalBackupTags?: Tag[]; + + /** + *To delete a file system if there are child volumes present below the root volume,
+ * use the string DELETE_CHILD_VOLUMES_AND_SNAPSHOTS
. If your file system
+ * has child volumes and you don't use this option, the delete request will fail.
true
and you specify
* one or more tags, only the specified tags are copied to backups. If you specify one or
* more tags when creating a user-initiated backup, no tags are copied from the file
- * system, regardless of this value.
+ * system, regardless of this value.
*/
CopyTagsToBackups?: boolean;
@@ -7132,7 +7229,7 @@ export interface UpdateFileSystemOpenZFSConfiguration {
* for the volume are copied to snapshots where the user doesn't specify tags. If this
* value is true
and you specify one or more tags, only the specified tags are
* copied to snapshots. If you specify one or more tags when creating the snapshot, no tags
- * are copied from the volume, regardless of this value.
+ * are copied from the volume, regardless of this value.
*/
CopyTagsToVolumes?: boolean;
@@ -7145,7 +7242,7 @@ export interface UpdateFileSystemOpenZFSConfiguration {
/**
* The throughput of an Amazon FSx file system, measured in megabytes per second - * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^12 (4096).
+ * (MBps). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s. */ ThroughputCapacity?: number; @@ -7376,7 +7473,7 @@ export interface UpdateSnapshotRequest { ClientRequestToken?: string; /** - *The name of the snapshot to update.
+ *The name of the snapshot to update.
*/ Name: string | undefined; @@ -7520,38 +7617,55 @@ export namespace UpdateOntapVolumeConfiguration { } /** - *Used to specify changes to the OpenZFS configuration for the volume that you are - * updating.
+ *Used to specify changes to the OpenZFS configuration for the volume + * that you are updating.
*/ export interface UpdateOpenZFSVolumeConfiguration { /** - *The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't - * reserve more storage than the parent volume has reserved.
+ *The amount of storage in gibibytes (GiB) to reserve from the parent volume.
+ * You can't reserve more storage than the parent volume has reserved. You can specify
+ * a value of -1
to unset a volume's storage capacity reservation.
The maximum amount of storage in gibibytes (GiB) that the volume can use from its - * parent. You can specify a quota larger than the storage on the parent volume.
+ *The maximum amount of storage in gibibytes (GiB) that the volume can use from its
+ * parent. You can specify a quota larger than the storage on the parent volume. You
+ * can specify a value of -1
to unset a volume's storage capacity quota.
Specifies the method used to compress the data on the volume. Unless the compression
- * type is specified, volumes inherit the DataCompressionType
value of their
- * parent volume.
Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, + * 16, 32, 64, 128, 256, 512, or 1024 KiB. The default is 128 KiB. + * Most workloads should use the default record size. Database workflows can benefit from a smaller + * record size, while streaming workflows can benefit from a larger record size. For additional guidance on when + * to set a custom record size, see + * Tips for maximizing performance in the + * Amazon FSx for OpenZFS User Guide.
+ */ + RecordSizeKiB?: number; + + /** + *Specifies the method used to compress the data on the volume. The compression
+ * type is NONE
by default.
- * NONE
- Doesn't compress the data on the volume.
NONE
- Doesn't compress the data on the volume.
+ * NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard
- * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on
- * your volume and has very little impact on compute resources.
+ * LZ4
- Compresses the data in the volume using the LZ4
+ * compression algorithm. Compared to Z-Standard, LZ4 is less compute-intensive
+ * and delivers higher write throughput speeds.
Describes why a resource lifecycle state changed.
+ */ + LifecycleTransitionReason?: LifecycleTransitionReason; + /** *A list of Tag
values, with a maximum of 50 elements.
The GameLift service limits and current utilization for an Amazon Web Services Region or location. + *
Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. * Instance limits control the number of instances, per instance type, per location, that * your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information * returned includes the maximum number of instances allowed and your account's current @@ -3659,7 +3659,7 @@ export class GameLift extends GameLiftClient { *
This operation is not designed to be continually called to track matchmaking ticket * status. This practice can cause you to exceed your API limit, which results in errors. * Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide - * the topic ARN in the matchmaking configuration. Continuously poling ticket status with + * the topic ARN in the matchmaking configuration. Continuously polling ticket status with * DescribeMatchmaking should only be used for games in development * with low matchmaking usage.
* diff --git a/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts b/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts index 0d09c2ff15cd..0e204ccf6c99 100644 --- a/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts +++ b/clients/client-gamelift/src/commands/DescribeEC2InstanceLimitsCommand.ts @@ -22,7 +22,7 @@ export interface DescribeEC2InstanceLimitsCommandInput extends DescribeEC2Instan export interface DescribeEC2InstanceLimitsCommandOutput extends DescribeEC2InstanceLimitsOutput, __MetadataBearer {} /** - *The GameLift service limits and current utilization for an Amazon Web Services Region or location. + *
Retrieves the instance limits and current utilization for an Amazon Web Services Region or location. * Instance limits control the number of instances, per instance type, per location, that * your Amazon Web Services account can use. Learn more at Amazon EC2 Instance Types. The information * returned includes the maximum number of instances allowed and your account's current diff --git a/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts b/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts index cc6fa4b138be..98c7d3dc641d 100644 --- a/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts +++ b/clients/client-gamelift/src/commands/DescribeMatchmakingCommand.ts @@ -31,7 +31,7 @@ export interface DescribeMatchmakingCommandOutput extends DescribeMatchmakingOut *
This operation is not designed to be continually called to track matchmaking ticket * status. This practice can cause you to exceed your API limit, which results in errors. * Instead, as a best practice, set up an Amazon Simple Notification Service to receive notifications, and provide - * the topic ARN in the matchmaking configuration. Continuously poling ticket status with + * the topic ARN in the matchmaking configuration. Continuously polling ticket status with * DescribeMatchmaking should only be used for games in development * with low matchmaking usage.
* diff --git a/clients/client-greengrassv2/src/GreengrassV2.ts b/clients/client-greengrassv2/src/GreengrassV2.ts index 4f0cf794390c..7782a8dd418a 100644 --- a/clients/client-greengrassv2/src/GreengrassV2.ts +++ b/clients/client-greengrassv2/src/GreengrassV2.ts @@ -400,8 +400,8 @@ export class GreengrassV2 extends GreengrassV2Client { * target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the * new deployment to the target devices. *Every deployment has a revision number that indicates how many deployment revisions you - * define for a target. Use this operation to create a new revision of an existing deployment. - * This operation returns the revision number of the new deployment when you create it.
+ * define for a target. Use this operation to create a new revision of an existing + * deployment. *For more information, see the Create deployments in the * IoT Greengrass V2 Developer Guide.
*/ @@ -641,7 +641,7 @@ export class GreengrassV2 extends GreengrassV2Client { *Retrieves connectivity information for a Greengrass core device.
*Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.
@@ -1144,7 +1144,7 @@ export class GreengrassV2 extends GreengrassV2Client { *Updates connectivity information for a Greengrass core device.
*Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.
diff --git a/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts b/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts index d12a77b2f034..3ce6a00e0503 100644 --- a/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts +++ b/clients/client-greengrassv2/src/commands/CreateDeploymentCommand.ts @@ -29,8 +29,8 @@ export interface CreateDeploymentCommandOutput extends CreateDeploymentResponse, * target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the * new deployment to the target devices. *Every deployment has a revision number that indicates how many deployment revisions you - * define for a target. Use this operation to create a new revision of an existing deployment. - * This operation returns the revision number of the new deployment when you create it.
+ * define for a target. Use this operation to create a new revision of an existing + * deployment. *For more information, see the Create deployments in the * IoT Greengrass V2 Developer Guide.
* @example diff --git a/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts index 5ea4e90b976c..b92dcb887b41 100644 --- a/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts +++ b/clients/client-greengrassv2/src/commands/GetConnectivityInfoCommand.ts @@ -25,7 +25,7 @@ export interface GetConnectivityInfoCommandOutput extends GetConnectivityInfoRes *Retrieves connectivity information for a Greengrass core device.
*Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.
diff --git a/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts index 3bd6c669d47f..5820ab873754 100644 --- a/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts +++ b/clients/client-greengrassv2/src/commands/UpdateConnectivityInfoCommand.ts @@ -25,7 +25,7 @@ export interface UpdateConnectivityInfoCommandOutput extends UpdateConnectivityI *Updates connectivity information for a Greengrass core device.
*Connectivity information includes endpoints and ports where client devices * can connect to an MQTT broker on the core device. When a client device - * calls the Greengrass discovery API, + * calls the IoT Greengrass discovery API, * IoT Greengrass returns connectivity information for all of the core devices where the client device can * connect. For more information, see Connect client devices to * core devices in the IoT Greengrass Version 2 Developer Guide.
diff --git a/clients/client-kafkaconnect/src/KafkaConnect.ts b/clients/client-kafkaconnect/src/KafkaConnect.ts index 54f5e8cbf625..b9588b0e747b 100644 --- a/clients/client-kafkaconnect/src/KafkaConnect.ts +++ b/clients/client-kafkaconnect/src/KafkaConnect.ts @@ -20,6 +20,11 @@ import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput, } from "./commands/DeleteConnectorCommand"; +import { + DeleteCustomPluginCommand, + DeleteCustomPluginCommandInput, + DeleteCustomPluginCommandOutput, +} from "./commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommand, DescribeConnectorCommandInput, @@ -189,6 +194,38 @@ export class KafkaConnect extends KafkaConnectClient { } } + /** + *Deletes a custom plugin.
+ */ + public deleteCustomPlugin( + args: DeleteCustomPluginCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns summary information about the connector.
*/ @@ -286,7 +323,9 @@ export class KafkaConnect extends KafkaConnectClient { } /** - *Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
+ *Returns a list of all the connectors in this account and Region. The list is limited to + * connectors whose name starts with the specified prefix. The response also includes a + * description of each of the listed connectors.
*/ public listConnectors( args: ListConnectorsCommandInput, diff --git a/clients/client-kafkaconnect/src/KafkaConnectClient.ts b/clients/client-kafkaconnect/src/KafkaConnectClient.ts index 7e20139e1440..500ef570ad92 100644 --- a/clients/client-kafkaconnect/src/KafkaConnectClient.ts +++ b/clients/client-kafkaconnect/src/KafkaConnectClient.ts @@ -58,6 +58,7 @@ import { CreateWorkerConfigurationCommandOutput, } from "./commands/CreateWorkerConfigurationCommand"; import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput } from "./commands/DeleteConnectorCommand"; +import { DeleteCustomPluginCommandInput, DeleteCustomPluginCommandOutput } from "./commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommandInput, DescribeConnectorCommandOutput } from "./commands/DescribeConnectorCommand"; import { DescribeCustomPluginCommandInput, @@ -81,6 +82,7 @@ export type ServiceInputTypes = | CreateCustomPluginCommandInput | CreateWorkerConfigurationCommandInput | DeleteConnectorCommandInput + | DeleteCustomPluginCommandInput | DescribeConnectorCommandInput | DescribeCustomPluginCommandInput | DescribeWorkerConfigurationCommandInput @@ -94,6 +96,7 @@ export type ServiceOutputTypes = | CreateCustomPluginCommandOutput | CreateWorkerConfigurationCommandOutput | DeleteConnectorCommandOutput + | DeleteCustomPluginCommandOutput | DescribeConnectorCommandOutput | DescribeCustomPluginCommandOutput | DescribeWorkerConfigurationCommandOutput diff --git a/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts b/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts new file mode 100644 index 000000000000..93637f7acbc1 --- /dev/null +++ b/clients/client-kafkaconnect/src/commands/DeleteCustomPluginCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KafkaConnectClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KafkaConnectClient"; +import { DeleteCustomPluginRequest, DeleteCustomPluginResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteCustomPluginCommand, + serializeAws_restJson1DeleteCustomPluginCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteCustomPluginCommandInput extends DeleteCustomPluginRequest {} +export interface DeleteCustomPluginCommandOutput extends DeleteCustomPluginResponse, __MetadataBearer {} + +/** + *Deletes a custom plugin.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DeleteCustomPluginCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DeleteCustomPluginCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DeleteCustomPluginCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteCustomPluginCommandInput} for command's `input` shape. + * @see {@link DeleteCustomPluginCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for KafkaConnectClient's `config` shape. + * + */ +export class DeleteCustomPluginCommand extends $Command< + DeleteCustomPluginCommandInput, + DeleteCustomPluginCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteCustomPluginCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
+ *Returns a list of all the connectors in this account and Region. The list is limited to + * connectors whose name starts with the specified prefix. The response also includes a + * description of each of the listed connectors.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kafkaconnect/src/commands/index.ts b/clients/client-kafkaconnect/src/commands/index.ts index 2b848ed670fb..8fc3e2d1748a 100644 --- a/clients/client-kafkaconnect/src/commands/index.ts +++ b/clients/client-kafkaconnect/src/commands/index.ts @@ -2,6 +2,7 @@ export * from "./CreateConnectorCommand"; export * from "./CreateCustomPluginCommand"; export * from "./CreateWorkerConfigurationCommand"; export * from "./DeleteConnectorCommand"; +export * from "./DeleteCustomPluginCommand"; export * from "./DescribeConnectorCommand"; export * from "./DescribeCustomPluginCommand"; export * from "./DescribeWorkerConfigurationCommand"; diff --git a/clients/client-kafkaconnect/src/models/models_0.ts b/clients/client-kafkaconnect/src/models/models_0.ts index 2098c0c7f27e..875f2b7e2b89 100644 --- a/clients/client-kafkaconnect/src/models/models_0.ts +++ b/clients/client-kafkaconnect/src/models/models_0.ts @@ -1,4 +1,4 @@ -import { ExceptionOptionType as __ExceptionOptionType } from "@aws-sdk/smithy-client"; +import { ExceptionOptionType as __ExceptionOptionType, SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer } from "@aws-sdk/types"; import { KafkaConnectServiceException as __BaseException } from "./KafkaConnectServiceException"; @@ -8,7 +8,8 @@ import { KafkaConnectServiceException as __BaseException } from "./KafkaConnectS */ export interface ScaleInPolicyDescription { /** - *Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ *Specifies the CPU utilization percentage threshold at which you want connector scale in + * to be triggered.
*/ cpuUtilizationPercentage?: number; } @@ -27,7 +28,8 @@ export namespace ScaleInPolicyDescription { */ export interface ScaleOutPolicyDescription { /** - *The CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ *The CPU utilization percentage threshold at which you want connector scale out to be + * triggered.
*/ cpuUtilizationPercentage?: number; } @@ -51,7 +53,8 @@ export interface AutoScalingDescription { maxWorkerCount?: number; /** - *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.
*/ mcuCount?: number; @@ -85,7 +88,8 @@ export namespace AutoScalingDescription { */ export interface ProvisionedCapacityDescription { /** - *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.
*/ mcuCount?: number; @@ -170,7 +174,8 @@ export interface ApacheKafkaClusterDescription { bootstrapServers?: string; /** - *Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
+ *Details of an Amazon VPC which has network connectivity to the Apache Kafka + * cluster.
*/ vpc?: VpcDescription; } @@ -209,11 +214,13 @@ export enum KafkaClusterClientAuthenticationType { } /** - *The client authentication information used in order to authenticate with the Apache Kafka cluster.
+ *The client authentication information used in order to authenticate with the Apache + * Kafka cluster.
*/ export interface KafkaClusterClientAuthenticationDescription { /** - *The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
+ *The type of client authentication used to connect to the Apache Kafka cluster. Value + * NONE means that no client authentication is used.
*/ authenticationType?: KafkaClusterClientAuthenticationType | string; } @@ -276,11 +283,13 @@ export namespace CloudWatchLogsLogDeliveryDescription { } /** - *A description of the settings for delivering logs to Amazon Kinesis Data Firehose.
+ *A description of the settings for delivering logs to Amazon Kinesis Data + * Firehose.
*/ export interface FirehoseLogDeliveryDescription { /** - *The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
+ *The name of the Kinesis Data Firehose delivery stream that is the destination for log + * delivery.
*/ deliveryStream?: string; @@ -329,7 +338,8 @@ export namespace S3LogDeliveryDescription { } /** - *Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ *Workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.
*/ export interface WorkerLogDeliveryDescription { /** @@ -362,7 +372,8 @@ export namespace WorkerLogDeliveryDescription { */ export interface LogDeliveryDescription { /** - *The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ *The workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.
*/ workerLogDelivery?: WorkerLogDeliveryDescription; } @@ -488,7 +499,8 @@ export interface ConnectorSummary { kafkaCluster?: KafkaClusterDescription; /** - *The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
+ *The type of client authentication used to connect to the Apache Kafka cluster. The value + * is NONE when no client authentication is used.
*/ kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; @@ -498,7 +510,8 @@ export interface ConnectorSummary { kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; /** - *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ *The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.
*/ kafkaConnectVersion?: string; @@ -513,7 +526,8 @@ export interface ConnectorSummary { plugins?: PluginDescription[]; /** - *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
+ *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon + * Web Services resources.
*/ serviceExecutionRoleArn?: string; @@ -551,7 +565,8 @@ export enum CustomPluginContentType { */ export interface CustomPluginFileDescription { /** - *The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.
+ *The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the + * file.
*/ fileMd5?: string; @@ -604,7 +619,8 @@ export namespace S3LocationDescription { */ export interface CustomPluginLocationDescription { /** - *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
+ *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin + * file stored in Amazon S3.
*/ s3Location?: S3LocationDescription; } @@ -707,7 +723,8 @@ export namespace CustomPluginSummary { } /** - *A plugin is an AWS resource that contains the code that defines a connector's logic.
+ *A plugin is an AWS resource that contains the code that defines a connector's + * logic.
*/ export interface CustomPlugin { /** @@ -731,7 +748,8 @@ export namespace CustomPlugin { } /** - *A plugin is an AWS resource that contains the code that defines your connector logic.
+ *A plugin is an AWS resource that contains the code that defines your connector logic. + *
*/ export interface Plugin { /** @@ -851,7 +869,8 @@ export interface ApacheKafkaCluster { bootstrapServers: string | undefined; /** - *Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
+ *Details of an Amazon VPC which has network connectivity to the Apache Kafka + * cluster.
*/ vpc: Vpc | undefined; } @@ -870,7 +889,8 @@ export namespace ApacheKafkaCluster { */ export interface ScaleInPolicy { /** - *Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ *Specifies the CPU utilization percentage threshold at which you want connector scale in + * to be triggered.
*/ cpuUtilizationPercentage: number | undefined; } @@ -889,7 +909,8 @@ export namespace ScaleInPolicy { */ export interface ScaleOutPolicy { /** - *The CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ *The CPU utilization percentage threshold at which you want connector scale out to be + * triggered.
*/ cpuUtilizationPercentage: number | undefined; } @@ -913,7 +934,8 @@ export interface AutoScaling { maxWorkerCount: number | undefined; /** - *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.
*/ mcuCount: number | undefined; @@ -947,7 +969,8 @@ export namespace AutoScaling { */ export interface ScaleInPolicyUpdate { /** - *The target CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ *The target CPU utilization percentage threshold at which you want connector scale in to + * be triggered.
*/ cpuUtilizationPercentage: number | undefined; } @@ -966,7 +989,8 @@ export namespace ScaleInPolicyUpdate { */ export interface ScaleOutPolicyUpdate { /** - *The target CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ *The target CPU utilization percentage threshold at which you want connector scale out to + * be triggered.
*/ cpuUtilizationPercentage: number | undefined; } @@ -990,7 +1014,8 @@ export interface AutoScalingUpdate { maxWorkerCount: number | undefined; /** - *The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The target number of microcontroller units (MCUs) allocated to each connector worker. + * The valid values are 1,2,4,8.
*/ mcuCount: number | undefined; @@ -1020,7 +1045,8 @@ export namespace AutoScalingUpdate { } /** - *HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.
+ *HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then + * retry it.
*/ export class BadRequestException extends __BaseException { readonly name: "BadRequestException" = "BadRequestException"; @@ -1043,7 +1069,8 @@ export class BadRequestException extends __BaseException { */ export interface ProvisionedCapacity { /** - *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.
*/ mcuCount: number | undefined; @@ -1063,7 +1090,8 @@ export namespace ProvisionedCapacity { } /** - *Information about the capacity of the connector, whether it is auto scaled or provisioned.
+ *Information about the capacity of the connector, whether it is auto scaled or + * provisioned.
*/ export interface Capacity { /** @@ -1091,7 +1119,8 @@ export namespace Capacity { */ export interface ProvisionedCapacityUpdate { /** - *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ *The number of microcontroller units (MCUs) allocated to each connector worker. The valid + * values are 1,2,4,8.
*/ mcuCount: number | undefined; @@ -1111,7 +1140,8 @@ export namespace ProvisionedCapacityUpdate { } /** - *The target capacity for the connector. The capacity can be auto scaled or provisioned.
+ *The target capacity for the connector. The capacity can be auto scaled or + * provisioned.
*/ export interface CapacityUpdate { /** @@ -1159,7 +1189,8 @@ export namespace CloudWatchLogsLogDelivery { } /** - *HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.
+ *HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your + * request with another name.
*/ export class ConflictException extends __BaseException { readonly name: "ConflictException" = "ConflictException"; @@ -1197,11 +1228,13 @@ export namespace KafkaCluster { } /** - *The client authentication information used in order to authenticate with the Apache Kafka cluster.
+ *The client authentication information used in order to authenticate with the Apache + * Kafka cluster.
*/ export interface KafkaClusterClientAuthentication { /** - *The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
+ *The type of client authentication used to connect to the Apache Kafka cluster. Value + * NONE means that no client authentication is used.
*/ authenticationType: KafkaClusterClientAuthenticationType | string | undefined; } @@ -1239,7 +1272,8 @@ export namespace KafkaClusterEncryptionInTransit { */ export interface FirehoseLogDelivery { /** - *The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
+ *The name of the Kinesis Data Firehose delivery stream that is the destination for log + * delivery.
*/ deliveryStream?: string; @@ -1288,7 +1322,8 @@ export namespace S3LogDelivery { } /** - *Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ *Workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.
*/ export interface WorkerLogDelivery { /** @@ -1321,7 +1356,8 @@ export namespace WorkerLogDelivery { */ export interface LogDelivery { /** - *The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ *The workers can send worker logs to different destination types. This configuration + * specifies the details of these destinations.
*/ workerLogDelivery: WorkerLogDelivery | undefined; } @@ -1336,7 +1372,8 @@ export namespace LogDelivery { } /** - *The configuration of the workers, which are the processes that run the connector logic.
+ *The configuration of the workers, which are the processes that run the connector + * logic.
*/ export interface WorkerConfiguration { /** @@ -1361,7 +1398,8 @@ export namespace WorkerConfiguration { export interface CreateConnectorRequest { /** - *Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.
+ *Information about the capacity allocated to the connector. Exactly one of the two + * properties must be specified.
*/ capacity: Capacity | undefined; @@ -1396,7 +1434,8 @@ export interface CreateConnectorRequest { kafkaClusterEncryptionInTransit: KafkaClusterEncryptionInTransit | undefined; /** - *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ *The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.
*/ kafkaConnectVersion: string | undefined; @@ -1411,7 +1450,10 @@ export interface CreateConnectorRequest { plugins: Plugin[] | undefined; /** - *The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
+ *The Amazon Resource Name (ARN) of the IAM role used by the connector to access the + * Amazon Web Services resources that it needs. The types of resources depends on the logic of + * the connector. For example, a connector that has Amazon S3 as a destination must have + * permissions that allow it to write to the S3 destination bucket.
*/ serviceExecutionRoleArn: string | undefined; @@ -1427,6 +1469,7 @@ export namespace CreateConnectorRequest { */ export const filterSensitiveLog = (obj: CreateConnectorRequest): any => ({ ...obj, + ...(obj.connectorConfiguration && { connectorConfiguration: SENSITIVE_STRING }), }); } @@ -1457,7 +1500,8 @@ export namespace CreateConnectorResponse { } /** - *HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.
+ *HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your + * request.
*/ export class ForbiddenException extends __BaseException { readonly name: "ForbiddenException" = "ForbiddenException"; @@ -1476,7 +1520,8 @@ export class ForbiddenException extends __BaseException { } /** - *HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.
+ *HTTP Status Code 500: Unexpected internal server error. Retrying your request might + * resolve the issue.
*/ export class InternalServerErrorException extends __BaseException { readonly name: "InternalServerErrorException" = "InternalServerErrorException"; @@ -1495,7 +1540,8 @@ export class InternalServerErrorException extends __BaseException { } /** - *HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.
+ *HTTP Status Code 404: Resource not found due to incorrect input. Correct your request + * and then retry it.
*/ export class NotFoundException extends __BaseException { readonly name: "NotFoundException" = "NotFoundException"; @@ -1514,7 +1560,8 @@ export class NotFoundException extends __BaseException { } /** - *HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.
+ *HTTP Status Code 503: Service Unavailable. Retrying your request in some time might + * resolve the issue.
*/ export class ServiceUnavailableException extends __BaseException { readonly name: "ServiceUnavailableException" = "ServiceUnavailableException"; @@ -1552,7 +1599,8 @@ export class TooManyRequestsException extends __BaseException { } /** - *HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.
+ *HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be + * validated.
*/ export class UnauthorizedException extends __BaseException { readonly name: "UnauthorizedException" = "UnauthorizedException"; @@ -1604,7 +1652,8 @@ export namespace S3Location { */ export interface CustomPluginLocation { /** - *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
+ *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin + * file stored in Amazon S3.
*/ s3Location: S3Location | undefined; } @@ -1703,6 +1752,7 @@ export namespace CreateWorkerConfigurationRequest { */ export const filterSensitiveLog = (obj: CreateWorkerConfigurationRequest): any => ({ ...obj, + ...(obj.propertiesFileContent && { propertiesFileContent: SENSITIVE_STRING }), }); } @@ -1779,6 +1829,43 @@ export namespace DeleteConnectorResponse { }); } +export interface DeleteCustomPluginRequest { + /** + *The Amazon Resource Name (ARN) of the custom plugin that you want to delete.
+ */ + customPluginArn: string | undefined; +} + +export namespace DeleteCustomPluginRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteCustomPluginRequest): any => ({ + ...obj, + }); +} + +export interface DeleteCustomPluginResponse { + /** + *The Amazon Resource Name (ARN) of the custom plugin that you requested to delete.
+ */ + customPluginArn?: string; + + /** + *The state of the custom plugin.
+ */ + customPluginState?: CustomPluginState | string; +} + +export namespace DeleteCustomPluginResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteCustomPluginResponse): any => ({ + ...obj, + }); +} + export interface DescribeConnectorRequest { /** *The Amazon Resource Name (ARN) of the connector that you want to describe.
@@ -1795,9 +1882,34 @@ export namespace DescribeConnectorRequest { }); } +/** + *Details about the state of a resource.
+ */ +export interface StateDescription { + /** + *A code that describes the state of a resource.
+ */ + code?: string; + + /** + *A message that describes the state of a resource.
+ */ + message?: string; +} + +export namespace StateDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StateDescription): any => ({ + ...obj, + }); +} + export interface DescribeConnectorResponse { /** - *Information about the capacity of the connector, whether it is auto scaled or provisioned.
+ *Information about the capacity of the connector, whether it is auto scaled or + * provisioned.
*/ capacity?: CapacityDescription; @@ -1842,7 +1954,8 @@ export interface DescribeConnectorResponse { kafkaCluster?: KafkaClusterDescription; /** - *The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
+ *The type of client authentication used to connect to the Apache Kafka cluster. The value + * is NONE when no client authentication is used.
*/ kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; @@ -1852,7 +1965,8 @@ export interface DescribeConnectorResponse { kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; /** - *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ *The version of Kafka Connect. It has to be compatible with both the Apache Kafka + * cluster's version and the plugins.
*/ kafkaConnectVersion?: string; @@ -1867,7 +1981,8 @@ export interface DescribeConnectorResponse { plugins?: PluginDescription[]; /** - *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
+ *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon + * Web Services resources.
*/ serviceExecutionRoleArn?: string; @@ -1875,6 +1990,11 @@ export interface DescribeConnectorResponse { *Specifies which worker configuration was used for the connector.
*/ workerConfiguration?: WorkerConfigurationDescription; + + /** + *Details about the state of a connector.
+ */ + stateDescription?: StateDescription; } export namespace DescribeConnectorResponse { @@ -1883,6 +2003,7 @@ export namespace DescribeConnectorResponse { */ export const filterSensitiveLog = (obj: DescribeConnectorResponse): any => ({ ...obj, + ...(obj.connectorConfiguration && { connectorConfiguration: SENSITIVE_STRING }), }); } @@ -1924,7 +2045,8 @@ export interface DescribeCustomPluginResponse { description?: string; /** - *The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.
+ *The latest successfully created revision of the custom plugin. If there are no + * successfully created revisions, this field will be absent.
*/ latestRevision?: CustomPluginRevisionSummary; @@ -1932,6 +2054,11 @@ export interface DescribeCustomPluginResponse { *The name of the custom plugin.
*/ name?: string; + + /** + *Details about the state of a custom plugin.
+ */ + stateDescription?: StateDescription; } export namespace DescribeCustomPluginResponse { @@ -1945,7 +2072,8 @@ export namespace DescribeCustomPluginResponse { export interface DescribeWorkerConfigurationRequest { /** - *The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.
+ *The Amazon Resource Name (ARN) of the worker configuration that you want to get + * information about.
*/ workerConfigurationArn: string | undefined; } @@ -1990,6 +2118,7 @@ export namespace WorkerConfigurationRevisionDescription { */ export const filterSensitiveLog = (obj: WorkerConfigurationRevisionDescription): any => ({ ...obj, + ...(obj.propertiesFileContent && { propertiesFileContent: SENSITIVE_STRING }), }); } @@ -2026,6 +2155,9 @@ export namespace DescribeWorkerConfigurationResponse { */ export const filterSensitiveLog = (obj: DescribeWorkerConfigurationResponse): any => ({ ...obj, + ...(obj.latestRevision && { + latestRevision: WorkerConfigurationRevisionDescription.filterSensitiveLog(obj.latestRevision), + }), }); } @@ -2041,7 +2173,9 @@ export interface ListConnectorsRequest { maxResults?: number; /** - *If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ *If the response of a ListConnectors operation is truncated, it will include a NextToken. + * Send this NextToken in a subsequent request to continue listing from where the previous + * operation left off.
*/ nextToken?: string; } @@ -2062,7 +2196,9 @@ export interface ListConnectorsResponse { connectors?: ConnectorSummary[]; /** - *If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.
+ *If the response of a ListConnectors operation is truncated, it will include a NextToken. + * Send this NextToken in a subsequent request to continue listing from where it left + * off.
*/ nextToken?: string; } @@ -2083,7 +2219,9 @@ export interface ListCustomPluginsRequest { maxResults?: number; /** - *If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ *If the response of a ListCustomPlugins operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.
*/ nextToken?: string; } @@ -2104,7 +2242,9 @@ export interface ListCustomPluginsResponse { customPlugins?: CustomPluginSummary[]; /** - *If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ *If the response of a ListCustomPlugins operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.
*/ nextToken?: string; } @@ -2125,7 +2265,9 @@ export interface ListWorkerConfigurationsRequest { maxResults?: number; /** - *If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ *If the response of a ListWorkerConfigurations operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.
*/ nextToken?: string; } @@ -2141,7 +2283,9 @@ export namespace ListWorkerConfigurationsRequest { export interface ListWorkerConfigurationsResponse { /** - *If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ *If the response of a ListWorkerConfigurations operation is truncated, it will include a + * NextToken. Send this NextToken in a subsequent request to continue listing from where the + * previous operation left off.
*/ nextToken?: string; diff --git a/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts b/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts index 94319d782cae..38f472510aae 100644 --- a/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts +++ b/clients/client-kafkaconnect/src/protocols/Aws_restJson1.ts @@ -23,6 +23,7 @@ import { CreateWorkerConfigurationCommandOutput, } from "../commands/CreateWorkerConfigurationCommand"; import { DeleteConnectorCommandInput, DeleteConnectorCommandOutput } from "../commands/DeleteConnectorCommand"; +import { DeleteCustomPluginCommandInput, DeleteCustomPluginCommandOutput } from "../commands/DeleteCustomPluginCommand"; import { DescribeConnectorCommandInput, DescribeConnectorCommandOutput } from "../commands/DescribeConnectorCommand"; import { DescribeCustomPluginCommandInput, @@ -90,6 +91,7 @@ import { ScaleOutPolicyDescription, ScaleOutPolicyUpdate, ServiceUnavailableException, + StateDescription, TooManyRequestsException, UnauthorizedException, Vpc, @@ -252,6 +254,35 @@ export const serializeAws_restJson1DeleteConnectorCommand = async ( }); }; +export const serializeAws_restJson1DeleteCustomPluginCommand = async ( + input: DeleteCustomPluginCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/custom-plugins/{customPluginArn}"; + if (input.customPluginArn !== undefined) { + const labelValue: string = input.customPluginArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: customPluginArn."); + } + resolvedPath = resolvedPath.replace("{customPluginArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: customPluginArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DescribeConnectorCommand = async ( input: DescribeConnectorCommandInput, context: __SerdeContext @@ -746,6 +777,72 @@ const deserializeAws_restJson1DeleteConnectorCommandError = async ( } }; +export const deserializeAws_restJson1DeleteCustomPluginCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseProvides the configuration information of users or groups in + *
Provides the configuration information for users or groups in * your Amazon Web Services SSO identity source to grant access your Amazon Kendra * experience.
*/ @@ -406,7 +406,7 @@ export enum Persona { } /** - *Provides the configuration information of users or groups in your + *
Provides the configuration information for users or groups in your * Amazon Web Services SSO identity source for access to your Amazon Kendra experience. * Specific permissions are defined for each user or group once they are * granted access to your Amazon Kendra experience.
@@ -1844,7 +1844,7 @@ export enum ConfluenceVersion { } /** - *Provides information for connecting to an Amazon VPC.
+ *Provides the configuration information to connect to an Amazon VPC.
*/ export interface DataSourceVpcConfiguration { /** @@ -1873,8 +1873,8 @@ export namespace DataSourceVpcConfiguration { } /** - *Provides configuration information for data sources that connect - * to Confluence.
+ *Provides the configuration information to connect to Confluence + * as your data source.
*/ export interface ConfluenceConfiguration { /** @@ -2045,7 +2045,7 @@ export namespace ColumnConfiguration { } /** - *Provides the information necessary to connect to a + *
Provides the configuration information that's required to connect to a * database.
*/ export interface ConnectionConfiguration { @@ -2104,7 +2104,7 @@ export enum QueryIdentifiersEnclosingOption { } /** - *Provides information that configures Amazon Kendra to use a SQL + *
Provides the configuration information to use a SQL * database.
*/ export interface SqlConfiguration { @@ -2135,7 +2135,7 @@ export namespace SqlConfiguration { } /** - *Provides the information necessary to connect a database to an + *
Provides the configuration information to connect to a * index.
*/ export interface DatabaseConfiguration { @@ -2145,12 +2145,12 @@ export interface DatabaseConfiguration { DatabaseEngineType: DatabaseEngineType | string | undefined; /** - *The information necessary to connect to a database.
+ *Configuration information that's required to connect to a database.
*/ ConnectionConfiguration: ConnectionConfiguration | undefined; /** - *Provides information for connecting to an Amazon VPC.
+ *Provides the configuration information to connect to an Amazon VPC.
*/ VpcConfiguration?: DataSourceVpcConfiguration; @@ -2208,7 +2208,7 @@ export interface FsxConfiguration { FileSystemType: FsxFileSystemType | string | undefined; /** - *Provides the configuration information for connecting to an + *
Configuration information for connecting to an * Amazon Virtual Private Cloud for your Amazon FSx. Your Amazon FSx * instance must reside inside your VPC.
*/ @@ -2228,8 +2228,8 @@ export interface FsxConfiguration { * Amazon FSx file system for Windows. *password—The password of the active directory user with - * read and mounting access Amazon FSx Windows file system.
+ *password—The password of the Active Directory user account with + * read and mounting access to the Amazon FSx Windows file system.
*A list of DataSourceToIndexFieldMapping
objects that
* map Amazon FSx data source attributes or field names to Amazon Kendra
- * index field names in Amazon Kendra. To create custom fields, use the
+ * index field names. To create custom fields, use the
* UpdateIndex
API before you map to Amazon FSx fields.
* For more information, see Mapping
* data source fields. The Amazon FSx data source field names
@@ -2277,8 +2277,8 @@ export namespace FsxConfiguration {
}
/**
- *
Provides configuration information for data sources that connect - * to Google Drive.
+ *Provides the configuration information to connect to + * Google Drive as your data source.
*/ export interface GoogleDriveConfiguration { /** @@ -2382,8 +2382,8 @@ export namespace OneDriveUsers { } /** - *Provides configuration information for data sources that connect - * to OneDrive.
+ *Provides the configuration information to connect + * to OneDrive as your data source.
*/ export interface OneDriveConfiguration { /** @@ -2475,8 +2475,8 @@ export namespace DocumentsMetadataConfiguration { } /** - *Provides configuration information for a data source to index - * documents in an Amazon S3 bucket.
+ *Provides the configuration information to connect to + * an Amazon S3 bucket.
*/ export interface S3DataSourceConfiguration { /** @@ -2577,7 +2577,7 @@ export enum SalesforceChatterFeedIncludeFilterType { } /** - *Defines configuration for syncing a Salesforce chatter feed. The + *
The configuration information for syncing a Salesforce chatter feed. The * contents of the object comes from the Salesforce FeedItem * table.
*/ @@ -2666,7 +2666,7 @@ export enum SalesforceKnowledgeArticleState { } /** - *Provides configuration information for standard Salesforce + *
Configuration information for standard Salesforce * knowledge articles.
*/ export interface SalesforceStandardKnowledgeArticleTypeConfiguration { @@ -2699,7 +2699,7 @@ export namespace SalesforceStandardKnowledgeArticleTypeConfiguration { } /** - *Specifies configuration information for the knowledge article + *
Provides the configuration information for the knowledge article * types that Amazon Kendra indexes. Amazon Kendra indexes standard knowledge * articles and the standard fields of knowledge articles, or the * custom fields of custom knowledge articles, but not both
@@ -2713,13 +2713,13 @@ export interface SalesforceKnowledgeArticleConfiguration { IncludedStates: (SalesforceKnowledgeArticleState | string)[] | undefined; /** - *Provides configuration information for standard Salesforce + *
Configuration information for standard Salesforce * knowledge articles.
*/ StandardKnowledgeArticleTypeConfiguration?: SalesforceStandardKnowledgeArticleTypeConfiguration; /** - *Provides configuration information for custom Salesforce knowledge + *
Configuration information for custom Salesforce knowledge * articles.
*/ CustomKnowledgeArticleTypeConfigurations?: SalesforceCustomKnowledgeArticleTypeConfiguration[]; @@ -2735,7 +2735,7 @@ export namespace SalesforceKnowledgeArticleConfiguration { } /** - *Provides configuration information for processing attachments to + *
Provides the configuration information for processing attachments to * Salesforce standard objects.
*/ export interface SalesforceStandardObjectAttachmentConfiguration { @@ -2781,7 +2781,7 @@ export enum SalesforceStandardObjectName { } /** - *Specifies configuration information for indexing a single standard + *
Provides the configuration information for indexing a single standard * object.
*/ export interface SalesforceStandardObjectConfiguration { @@ -2891,7 +2891,7 @@ export interface SalesforceConfiguration { CrawlAttachments?: boolean; /** - *Provides configuration information for processing attachments to + *
Configuration information for processing attachments to * Salesforce standard objects.
*/ StandardObjectAttachmentConfiguration?: SalesforceStandardObjectAttachmentConfiguration; @@ -2932,7 +2932,7 @@ export enum ServiceNowAuthenticationType { } /** - *Provides configuration information for crawling knowledge articles + *
Provides the configuration information for crawling knowledge articles * in the ServiceNow site.
*/ export interface ServiceNowKnowledgeArticleConfiguration { @@ -2998,7 +2998,7 @@ export namespace ServiceNowKnowledgeArticleConfiguration { } /** - *Provides configuration information for crawling service catalog + *
Provides the configuration information for crawling service catalog * items in the ServiceNow site
*/ export interface ServiceNowServiceCatalogConfiguration { @@ -3062,8 +3062,8 @@ export enum ServiceNowBuildVersionType { } /** - *Provides configuration information required to connect to a - * ServiceNow data source.
+ *Provides the configuration information to connect to + * ServiceNow as your data source.
*/ export interface ServiceNowConfiguration { /** @@ -3089,13 +3089,13 @@ export interface ServiceNowConfiguration { ServiceNowBuildVersion: ServiceNowBuildVersionType | string | undefined; /** - *Provides configuration information for crawling knowledge articles + *
Configuration information for crawling knowledge articles * in the ServiceNow site.
*/ KnowledgeArticleConfiguration?: ServiceNowKnowledgeArticleConfiguration; /** - *Provides configuration information for crawling service catalogs + *
Configuration information for crawling service catalogs * in the ServiceNow site.
*/ ServiceCatalogConfiguration?: ServiceNowServiceCatalogConfiguration; @@ -3133,8 +3133,8 @@ export enum SharePointVersion { } /** - *Provides configuration information for connecting to a Microsoft - * SharePoint data source.
+ *Provides the configuration information to connect to Microsoft + * SharePoint as your data source.
*/ export interface SharePointConfiguration { /** @@ -3203,7 +3203,7 @@ export interface SharePointConfiguration { ExclusionPatterns?: string[]; /** - *Provides information for connecting to an Amazon VPC.
+ *Provides the configuration information to connect to an Amazon VPC.
*/ VpcConfiguration?: DataSourceVpcConfiguration; @@ -3294,7 +3294,7 @@ export enum WebCrawlerMode { } /** - *Provides the configuration information of the seed or starting point URLs to crawl.
+ *Provides the configuration information for the seed or starting point URLs to crawl.
** When selecting websites to index, you must adhere to * the Amazon Acceptable Use Policy @@ -3347,7 +3347,7 @@ export namespace SeedUrlConfiguration { } /** - *
Provides the configuration information of the sitemap URLs to crawl.
+ *Provides the configuration information for the sitemap URLs to crawl.
** When selecting websites to index, you must adhere to * the Amazon Acceptable Use Policy @@ -3389,7 +3389,7 @@ export namespace SiteMapsConfiguration { */ export interface Urls { /** - *
Provides the configuration of the seed or starting point URLs of the websites + *
Configuration of the seed or starting point URLs of the websites * you want to crawl.
*You can choose to crawl only the website host names, or the website host names * with subdomains, or the website host names with subdomains and other domains @@ -3399,7 +3399,7 @@ export interface Urls { SeedUrlConfiguration?: SeedUrlConfiguration; /** - *
Provides the configuration of the sitemap URLs of the websites you want to crawl.
+ *Configuration of the sitemap URLs of the websites you want to crawl.
*Only URLs belonging to the same website host names are crawled. You can list up to * three sitemap URLs.
*/ @@ -3487,7 +3487,7 @@ export interface WebCrawlerConfiguration { UrlExclusionPatterns?: string[]; /** - *Provides configuration information required to connect to your internal + *
Configuration information required to connect to your internal * websites via a web proxy.
*You must provide the website host name and port number. For example, the * host name of https://a.example.com/page1.html is "a.example.com" and the @@ -3499,7 +3499,7 @@ export interface WebCrawlerConfiguration { ProxyConfiguration?: ProxyConfiguration; /** - *
Provides configuration information required to connect to websites using + *
Configuration information required to connect to websites using * authentication.
*You can connect to websites using basic authentication of user name and password.
*You must provide the website host name and port number. For example, the host name @@ -3602,42 +3602,42 @@ export namespace WorkDocsConfiguration { } /** - *
Configuration information for an Amazon Kendra data source.
+ *Provides the configuration information for an Amazon Kendra data source.
*/ export interface DataSourceConfiguration { /** - *Provides information to create a data source connector for a - * document repository in an Amazon S3 bucket.
+ *Provides the configuration information to connect to an Amazon S3 + * bucket as your data source.
*/ S3Configuration?: S3DataSourceConfiguration; /** - *Provides information necessary to create a data source connector - * for a Microsoft SharePoint site.
+ *Provides the configuration information to connect to Microsoft SharePoint + * as your data source.
*/ SharePointConfiguration?: SharePointConfiguration; /** - *Provides information necessary to create a data source connector - * for a database.
+ *Provides the configuration information to connect to a database as + * your data source.
*/ DatabaseConfiguration?: DatabaseConfiguration; /** - *Provides configuration information for data sources that connect - * to a Salesforce site.
+ *Provides the configuration information to connect to + * Salesforce as your data source.
*/ SalesforceConfiguration?: SalesforceConfiguration; /** - *Provides configuration for data sources that connect to Microsoft - * OneDrive.
+ *Provides the configuration information to connect to Microsoft + * OneDrive as your data source.
*/ OneDriveConfiguration?: OneDriveConfiguration; /** - *Provides configuration for data sources that connect to ServiceNow - * instances.
+ *Provides the configuration information to connect to ServiceNow + * as your data source.
*/ ServiceNowConfiguration?: ServiceNowConfiguration; @@ -3648,8 +3648,8 @@ export interface DataSourceConfiguration { ConfluenceConfiguration?: ConfluenceConfiguration; /** - *Provides configuration for data sources that connect to Google - * Drive.
+ *Provides the configuration information to connect to Google + * Drive as your data source.
*/ GoogleDriveConfiguration?: GoogleDriveConfiguration; @@ -3743,7 +3743,7 @@ export interface CreateDataSourceRequest { Type: DataSourceType | string | undefined; /** - *The connector configuration information that is required to access the + *
Configuration information that is required to access the data source * repository.
*You can't specify the Configuration
parameter when the
* Type
parameter is set to CUSTOM
. If you do,
@@ -3842,7 +3842,7 @@ export namespace CreateDataSourceResponse {
}
/**
- *
Configuration information for your content sources, such as data sources, + *
Provides the configuration information for your content sources, such as data sources, * FAQs, and content indexed directly via BatchPutDocument.
*/ export interface ContentSourceConfiguration { @@ -3874,7 +3874,7 @@ export namespace ContentSourceConfiguration { } /** - *Configuration information for the identifiers of your users.
+ *Provides the configuration information for the identifiers of your users.
*/ export interface UserIdentityConfiguration { /** @@ -3899,7 +3899,7 @@ export namespace UserIdentityConfiguration { } /** - *Specifies the configuration information for your Amazon Kendra experience. This includes + *
Provides the configuration information for your Amazon Kendra experience. This includes * the data source IDs and/or FAQ IDs, and user or group information to grant access * to your Amazon Kendra experience.
*/ @@ -3947,7 +3947,7 @@ export interface CreateExperienceRequest { RoleArn?: string; /** - *Provides the configuration information for your Amazon Kendra experience. This includes + *
Configuration information for your Amazon Kendra experience. This includes
* ContentSourceConfiguration
, which specifies the data source IDs
* and/or FAQ IDs, and UserIdentityConfiguration
, which specifies the
* user or group information to grant access to your Amazon Kendra experience.
Configuration information for the JSON token type.
+ *Provides the configuration information for the JSON token type.
*/ export interface JsonTokenTypeConfiguration { /** @@ -4191,7 +4191,7 @@ export enum KeyLocation { } /** - *Configuration information for the JWT token type.
+ *Provides the configuration information for the JWT token type.
*/ export interface JwtTokenTypeConfiguration { /** @@ -4240,7 +4240,7 @@ export namespace JwtTokenTypeConfiguration { } /** - *Provides configuration information for a token configuration.
+ *Provides the configuration information for a token.
*/ export interface UserTokenConfiguration { /** @@ -4772,8 +4772,7 @@ export interface DescribeDataSourceResponse { Type?: DataSourceType | string; /** - *Information that describes where the data source is located and how - * the data source is configured. The specific information in the description + *
Describes how the data source is configured. The specific information in the description * depends on the data source provider.
*/ Configuration?: DataSourceConfiguration; @@ -4875,7 +4874,7 @@ export enum EndpointType { } /** - *Provides the configuration information of the endpoint for your Amazon Kendra + *
Provides the configuration information for the endpoint for your Amazon Kendra * experience.
*/ export interface ExperienceEndpoint { @@ -7265,7 +7264,7 @@ export namespace ListIndicesRequest { } /** - *A summary of information about an index.
+ *A summary of information on the configuration of an index.
*/ export interface IndexConfigurationSummary { /** @@ -7316,7 +7315,7 @@ export namespace IndexConfigurationSummary { export interface ListIndicesResponse { /** - *An array of summary information for one or more indexes.
+ *An array of summary information on the configuration of one or more indexes.
*/ IndexConfigurationSummaryItems?: IndexConfigurationSummary[]; @@ -7878,9 +7877,33 @@ export namespace SortingConfiguration { } /** - *- * Data source information for user context filtering. - *
+ *Provides the configuration information for suggested query spell corrections.
+ *Suggested spell corrections are based on words that appear in your indexed documents + * and how closely a corrected word matches a misspelled word.
+ *This feature is designed with certain defaults or limits. For information on the + * current limits and how to request more support for some limits, see the + * Spell + * Checker documentation.
+ */ +export interface SpellCorrectionConfiguration { + /** + *
+ * TRUE
to suggest spell corrections for queries.
Data source information for user context filtering.
*/ export interface DataSourceGroup { /** @@ -8132,6 +8155,66 @@ export namespace QueryResultItem { }); } +/** + *A corrected misspelled word in a query.
+ */ +export interface Correction { + /** + *The zero-based location in the response string or text where + * the corrected word starts.
+ */ + BeginOffset?: number; + + /** + *The zero-based location in the response string or text where + * the corrected word ends.
+ */ + EndOffset?: number; + + /** + *The string or text of a misspelled word in a query.
+ */ + Term?: string; + + /** + *The string or text of a corrected misspelled word in a query.
+ */ + CorrectedTerm?: string; +} + +export namespace Correction { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Correction): any => ({ + ...obj, + }); +} + +/** + *A query with suggested spell corrections.
+ */ +export interface SpellCorrectedQuery { + /** + *The query with the suggested spell corrections.
+ */ + SuggestedQueryText?: string; + + /** + *The corrected misspelled word or words in a query.
+ */ + Corrections?: Correction[]; +} + +export namespace SpellCorrectedQuery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SpellCorrectedQuery): any => ({ + ...obj, + }); +} + export enum WarningCode { QUERY_LANGUAGE_INVALID_SYNTAX = "QUERY_LANGUAGE_INVALID_SYNTAX", } @@ -8194,6 +8277,11 @@ export interface QueryResult { * with advanced query syntax. */ Warnings?: Warning[]; + + /** + *A list of information related to suggested spell corrections for a query.
+ */ + SpellCorrectedQueries?: SpellCorrectedQuery[]; } export namespace QueryResult { @@ -8469,7 +8557,7 @@ export interface UpdateDataSourceRequest { IndexId: string | undefined; /** - *Configuration information for an Amazon Kendra data source.
+ *Configuration information for an Amazon Kendra data source you want to update.
*/ Configuration?: DataSourceConfiguration; @@ -8543,8 +8631,7 @@ export interface UpdateExperienceRequest { RoleArn?: string; /** - *Provides the user configuration information. This includes the Amazon Web Services SSO - * field name that contains the identifiers of your users, such as their emails.
+ *Configuration information for your Amazon Kendra you want to update.
*/ Configuration?: ExperienceConfiguration; @@ -8586,7 +8673,7 @@ export interface UpdateIndexRequest { Description?: string; /** - *The document metadata to update.
+ *The document metadata you want to update.
*/ DocumentMetadataConfigurationUpdates?: DocumentMetadataConfiguration[]; @@ -8989,6 +9076,11 @@ export interface QueryRequest { * email address, as theVisitorId
.
*/
VisitorId?: string;
+
+ /**
+ * Enables suggested spell corrections for queries.
+ */ + SpellCorrectionConfiguration?: SpellCorrectionConfiguration; } export namespace QueryRequest { diff --git a/clients/client-kendra/src/protocols/Aws_json1_1.ts b/clients/client-kendra/src/protocols/Aws_json1_1.ts index 462c0d1c13e3..3cf8534a2521 100644 --- a/clients/client-kendra/src/protocols/Aws_json1_1.ts +++ b/clients/client-kendra/src/protocols/Aws_json1_1.ts @@ -185,6 +185,7 @@ import { ConfluenceSpaceToIndexFieldMapping, ConnectionConfiguration, ContentSourceConfiguration, + Correction, CreateDataSourceRequest, CreateDataSourceResponse, CreateExperienceRequest, @@ -338,6 +339,8 @@ import { SharePointConfiguration, SiteMapsConfiguration, SortingConfiguration, + SpellCorrectedQuery, + SpellCorrectionConfiguration, SqlConfiguration, StartDataSourceSyncJobRequest, StartDataSourceSyncJobResponse, @@ -6139,6 +6142,13 @@ const serializeAws_json1_1QueryRequest = (input: QueryRequest, context: __SerdeC input.SortingConfiguration !== null && { SortingConfiguration: serializeAws_json1_1SortingConfiguration(input.SortingConfiguration, context), }), + ...(input.SpellCorrectionConfiguration !== undefined && + input.SpellCorrectionConfiguration !== null && { + SpellCorrectionConfiguration: serializeAws_json1_1SpellCorrectionConfiguration( + input.SpellCorrectionConfiguration, + context + ), + }), ...(input.UserContext !== undefined && input.UserContext !== null && { UserContext: serializeAws_json1_1UserContext(input.UserContext, context) }), ...(input.VisitorId !== undefined && input.VisitorId !== null && { VisitorId: input.VisitorId }), @@ -6654,6 +6664,18 @@ const serializeAws_json1_1SortingConfiguration = (input: SortingConfiguration, c }; }; +const serializeAws_json1_1SpellCorrectionConfiguration = ( + input: SpellCorrectionConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.IncludeQuerySpellCheckSuggestions !== undefined && + input.IncludeQuerySpellCheckSuggestions !== null && { + IncludeQuerySpellCheckSuggestions: input.IncludeQuerySpellCheckSuggestions, + }), + }; +}; + const serializeAws_json1_1SqlConfiguration = (input: SqlConfiguration, context: __SerdeContext): any => { return { ...(input.QueryIdentifiersEnclosingOption !== undefined && @@ -7580,6 +7602,27 @@ const deserializeAws_json1_1ContentSourceConfiguration = ( } as any; }; +const deserializeAws_json1_1Correction = (output: any, context: __SerdeContext): Correction => { + return { + BeginOffset: __expectInt32(output.BeginOffset), + CorrectedTerm: __expectString(output.CorrectedTerm), + EndOffset: __expectInt32(output.EndOffset), + Term: __expectString(output.Term), + } as any; +}; + +const deserializeAws_json1_1CorrectionList = (output: any, context: __SerdeContext): Correction[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Correction(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1CreateDataSourceResponse = ( output: any, context: __SerdeContext @@ -9052,6 +9095,10 @@ const deserializeAws_json1_1QueryResult = (output: any, context: __SerdeContext) output.ResultItems !== undefined && output.ResultItems !== null ? deserializeAws_json1_1QueryResultItemList(output.ResultItems, context) : undefined, + SpellCorrectedQueries: + output.SpellCorrectedQueries !== undefined && output.SpellCorrectedQueries !== null + ? deserializeAws_json1_1SpellCorrectedQueryList(output.SpellCorrectedQueries, context) + : undefined, TotalNumberOfResults: __expectInt32(output.TotalNumberOfResults), Warnings: output.Warnings !== undefined && output.Warnings !== null @@ -9662,6 +9709,28 @@ const deserializeAws_json1_1SnapshotsDataRecords = (output: any, context: __Serd return retVal; }; +const deserializeAws_json1_1SpellCorrectedQuery = (output: any, context: __SerdeContext): SpellCorrectedQuery => { + return { + Corrections: + output.Corrections !== undefined && output.Corrections !== null + ? deserializeAws_json1_1CorrectionList(output.Corrections, context) + : undefined, + SuggestedQueryText: __expectString(output.SuggestedQueryText), + } as any; +}; + +const deserializeAws_json1_1SpellCorrectedQueryList = (output: any, context: __SerdeContext): SpellCorrectedQuery[] => { + const retVal = (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1SpellCorrectedQuery(entry, context); + }); + return retVal; +}; + const deserializeAws_json1_1SqlConfiguration = (output: any, context: __SerdeContext): SqlConfiguration => { return { QueryIdentifiersEnclosingOption: __expectString(output.QueryIdentifiersEnclosingOption), diff --git a/clients/client-keyspaces/.gitignore b/clients/client-keyspaces/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-keyspaces/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-keyspaces/LICENSE b/clients/client-keyspaces/LICENSE new file mode 100644 index 000000000000..8efcd8d5c5b7 --- /dev/null +++ b/clients/client-keyspaces/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-keyspaces/README.md b/clients/client-keyspaces/README.md new file mode 100644 index 000000000000..7353f4ad36cc --- /dev/null +++ b/clients/client-keyspaces/README.md @@ -0,0 +1,219 @@ +# @aws-sdk/client-keyspaces + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-keyspaces/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-keyspaces) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-keyspaces.svg)](https://www.npmjs.com/package/@aws-sdk/client-keyspaces) + +## Description + +AWS SDK for JavaScript Keyspaces Client for Node.js, Browser and React Native. + +Amazon Keyspaces (for Apache Cassandra) is a scalable, +highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, +run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, +you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.
+ +In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, +Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes +the supported DDL operations in detail.
+ +For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types +in Amazon Keyspaces in the Amazon Keyspaces Developer +Guide.
+ +To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer +Guide.
+ +For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.
+ +## Installing + +To install the this package, simply type add or install @aws-sdk/client-keyspaces +using your favorite package manager: + +- `npm install @aws-sdk/client-keyspaces` +- `yarn add @aws-sdk/client-keyspaces` +- `pnpm add @aws-sdk/client-keyspaces` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `KeyspacesClient` and +the commands you need, for example `CreateKeyspaceCommand`: + +```js +// ES5 example +const { KeyspacesClient, CreateKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); +``` + +```ts +// ES6+ example +import { KeyspacesClient, CreateKeyspaceCommand } from "@aws-sdk/client-keyspaces"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new KeyspacesClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new CreateKeyspaceCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-keyspaces"; +const client = new AWS.Keyspaces({ region: "REGION" }); + +// async/await. +try { + const data = await client.createKeyspace(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .createKeyspace(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.createKeyspace(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-keyspaces` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-keyspaces/package.json b/clients/client-keyspaces/package.json new file mode 100644 index 000000000000..7b2445184587 --- /dev/null +++ b/clients/client-keyspaces/package.json @@ -0,0 +1,93 @@ +{ + "name": "@aws-sdk/client-keyspaces", + "description": "AWS SDK for JavaScript Keyspaces Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "concurrently 'yarn:build:cjs' 'yarn:build:es' 'yarn:build:types'", + "build:cjs": "tsc -p tsconfig.cjs.json", + "build:docs": "typedoc", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "build:types:downlevel": "downlevel-dts dist-types dist-types/ts3.4", + "clean": "rimraf ./dist-* && rimraf *.tsbuildinfo" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "*", + "@aws-sdk/config-resolver": "*", + "@aws-sdk/credential-provider-node": "*", + "@aws-sdk/fetch-http-handler": "*", + "@aws-sdk/hash-node": "*", + "@aws-sdk/invalid-dependency": "*", + "@aws-sdk/middleware-content-length": "*", + "@aws-sdk/middleware-host-header": "*", + "@aws-sdk/middleware-logger": "*", + "@aws-sdk/middleware-retry": "*", + "@aws-sdk/middleware-serde": "*", + "@aws-sdk/middleware-signing": "*", + "@aws-sdk/middleware-stack": "*", + "@aws-sdk/middleware-user-agent": "*", + "@aws-sdk/node-config-provider": "*", + "@aws-sdk/node-http-handler": "*", + "@aws-sdk/protocol-http": "*", + "@aws-sdk/smithy-client": "*", + "@aws-sdk/types": "*", + "@aws-sdk/url-parser": "*", + "@aws-sdk/util-base64-browser": "*", + "@aws-sdk/util-base64-node": "*", + "@aws-sdk/util-body-length-browser": "*", + "@aws-sdk/util-body-length-node": "*", + "@aws-sdk/util-defaults-mode-browser": "*", + "@aws-sdk/util-defaults-mode-node": "*", + "@aws-sdk/util-user-agent-browser": "*", + "@aws-sdk/util-user-agent-node": "*", + "@aws-sdk/util-utf8-browser": "*", + "@aws-sdk/util-utf8-node": "*", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "*", + "@tsconfig/recommended": "1.0.1", + "@types/node": "^12.7.5", + "concurrently": "7.0.0", + "downlevel-dts": "0.7.0", + "rimraf": "3.0.2", + "typedoc": "0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=12.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-keyspaces", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-keyspaces" + } +} diff --git a/clients/client-keyspaces/src/Keyspaces.ts b/clients/client-keyspaces/src/Keyspaces.ts new file mode 100644 index 000000000000..450698616777 --- /dev/null +++ b/clients/client-keyspaces/src/Keyspaces.ts @@ -0,0 +1,505 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + CreateKeyspaceCommand, + CreateKeyspaceCommandInput, + CreateKeyspaceCommandOutput, +} from "./commands/CreateKeyspaceCommand"; +import { CreateTableCommand, CreateTableCommandInput, CreateTableCommandOutput } from "./commands/CreateTableCommand"; +import { + DeleteKeyspaceCommand, + DeleteKeyspaceCommandInput, + DeleteKeyspaceCommandOutput, +} from "./commands/DeleteKeyspaceCommand"; +import { DeleteTableCommand, DeleteTableCommandInput, DeleteTableCommandOutput } from "./commands/DeleteTableCommand"; +import { GetKeyspaceCommand, GetKeyspaceCommandInput, GetKeyspaceCommandOutput } from "./commands/GetKeyspaceCommand"; +import { GetTableCommand, GetTableCommandInput, GetTableCommandOutput } from "./commands/GetTableCommand"; +import { + ListKeyspacesCommand, + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, +} from "./commands/ListKeyspacesCommand"; +import { ListTablesCommand, ListTablesCommandInput, ListTablesCommandOutput } from "./commands/ListTablesCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + RestoreTableCommand, + RestoreTableCommandInput, + RestoreTableCommandOutput, +} from "./commands/RestoreTableCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { UpdateTableCommand, UpdateTableCommandInput, UpdateTableCommandOutput } from "./commands/UpdateTableCommand"; +import { KeyspacesClient } from "./KeyspacesClient"; + +/** + *Amazon Keyspaces (for Apache Cassandra) is a scalable, + * highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, + * run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, + * you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.
+ * + *In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, + * Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes + * the supported DDL operations in detail.
+ * + *For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types + * in Amazon Keyspaces in the Amazon Keyspaces Developer + * Guide.
+ * + *To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer + * Guide.
+ * + *For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.
+ */ +export class Keyspaces extends KeyspacesClient { + /** + *The CreateKeyspace
operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names
+ * must be unique within each Region.
+ * CreateKeyspace
is an asynchronous operation. You can monitor the creation status of the new keyspace
+ * by using the GetKeyspace
operation.
For more information, see Creating keyspaces in the Amazon Keyspaces Developer + * Guide.
+ */ + public createKeyspace( + args: CreateKeyspaceCommandInput, + options?: __HttpHandlerOptions + ): PromiseThe CreateTable
operation adds a new table to the specified keyspace. Within a keyspace, table names
+ * must be unique.
+ * CreateTable
is an asynchronous operation. When the request is received, the status of the table is set to CREATING
.
+ * You can monitor the creation status of the new table by using the GetTable
+ * operation, which returns the current status
of the table. You can start using a table when the status is ACTIVE
.
For more information, see Creating tables in the Amazon Keyspaces Developer + * Guide.
+ */ + public createTable(args: CreateTableCommandInput, options?: __HttpHandlerOptions): PromiseThe DeleteKeyspace
operation deletes a keyspace and all of its tables.
+ * The DeleteTable
operation deletes a table and all of its data. After a DeleteTable
request is received,
+ * the specified table is in the DELETING
state until Amazon Keyspaces completes the deletion. If the table
+ * is in the ACTIVE
state, you can delete it. If a table is either in the CREATING
or UPDATING
states, then
+ * Amazon Keyspaces returns a ResourceInUseException
. If the specified table does not exist, Amazon Keyspaces returns
+ * a ResourceNotFoundException
. If the table is already in the DELETING
state, no error is returned.
Returns the name and the Amazon Resource Name (ARN) of the specified table.
+ */ + public getKeyspace(args: GetKeyspaceCommandInput, options?: __HttpHandlerOptions): PromiseReturns information about the table, including the table's name and current status, the keyspace name, + * configuration settings, and metadata.
+ *To read table metadata using GetTable
, Select
action
+ * permissions for the table and system tables are required to complete the operation.
Returns a list of keyspaces.
+ */ + public listKeyspaces( + args: ListKeyspacesCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of tables for a specified keyspace.
+ */ + public listTables(args: ListTablesCommandInput, options?: __HttpHandlerOptions): PromiseReturns a list of all tags associated with the specified Amazon Keyspaces resource.
+ */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): PromiseRestores the specified table to the specified point in time within the
+ * earliest_restorable_timestamp
and the current time. For more information about restore points, see
+ *
+ * Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide.
+ *
+ * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
+ *When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state
+ * based on the selected timestamp (day:hour:minute:second)
to a new table. The Time to Live (TTL) settings
+ * are also restored to the state based on the selected timestamp.
In addition to the table's schema, data, and TTL settings, RestoreTable
restores the capacity mode, encryption, and
+ * point-in-time recovery settings from the source table.
+ * Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp,
+ * these settings are always restored based on the table's settings as of the current time or when the table was deleted.
You can also overwrite these settings during restore:
+ *Read/write capacity mode
+ *Provisioned throughput capacity settings
+ *Point-in-time (PITR) settings
+ *Tags
+ *For more information, see PITR restore settings in the Amazon Keyspaces Developer + * Guide.
+ *The following settings are not restored, and you must configure them manually for the + * new table.
+ *Automatic scaling policies (for tables that use provisioned capacity + * mode)
+ *Identity and Access Management (IAM) policies
+ *Amazon CloudWatch metrics and alarms
+ *Associates a set of tags with a Amazon Keyspaces resource. You can then + * activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. + * For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ *For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, + * see Amazon Keyspaces resource access based on tags + * in the Amazon Keyspaces Developer Guide.
+ */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): PromiseRemoves the association of tags from a Amazon Keyspaces resource.
+ */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): PromiseAdds new columns to the table or updates one of the table's settings, for example + * capacity mode, encryption, point-in-time recovery, or ttl settings. + * Note that you can only update one specific table setting per update operation.
+ */ + public updateTable(args: UpdateTableCommandInput, options?: __HttpHandlerOptions): PromiseAmazon Keyspaces (for Apache Cassandra) is a scalable, + * highly available, and managed Apache Cassandra-compatible database service. Amazon Keyspaces makes it easy to migrate, + * run, and scale Cassandra workloads in the Amazon Web Services Cloud. With just a few clicks on the Amazon Web Services Management Console or a few lines of code, + * you can create keyspaces and tables in Amazon Keyspaces, without deploying any infrastructure or installing software.
+ * + *In addition to supporting Cassandra Query Language (CQL) requests via open-source Cassandra drivers, + * Amazon Keyspaces supports data definition language (DDL) operations to manage keyspaces and tables using the Amazon Web Services SDK and CLI. This API reference describes + * the supported DDL operations in detail.
+ * + *For the list of all supported CQL APIs, see Supported Cassandra APIs, operations, and data types + * in Amazon Keyspaces in the Amazon Keyspaces Developer + * Guide.
+ * + *To learn how Amazon Keyspaces API actions are tracked in CloudTrail, see Amazon Keyspaces information in CloudTrail in the Amazon Keyspaces Developer + * Guide.
+ * + *For more information about Amazon Web Services APIs, for example how to implement retry logic or how to sign Amazon Web Services API requests, see Amazon Web Services APIs in the General Reference.
+ */ +export class KeyspacesClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + KeyspacesClientResolvedConfig +> { + /** + * The resolved configuration of KeyspacesClient class. This is resolved and normalized from the {@link KeyspacesClientConfig | constructor configuration interface}. + */ + readonly config: KeyspacesClientResolvedConfig; + + constructor(configuration: KeyspacesClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts b/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts new file mode 100644 index 000000000000..0511e489171a --- /dev/null +++ b/clients/client-keyspaces/src/commands/CreateKeyspaceCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KeyspacesClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KeyspacesClient"; +import { CreateKeyspaceRequest, CreateKeyspaceResponse } from "../models/models_0"; +import { + deserializeAws_json1_0CreateKeyspaceCommand, + serializeAws_json1_0CreateKeyspaceCommand, +} from "../protocols/Aws_json1_0"; + +export interface CreateKeyspaceCommandInput extends CreateKeyspaceRequest {} +export interface CreateKeyspaceCommandOutput extends CreateKeyspaceResponse, __MetadataBearer {} + +/** + *The CreateKeyspace
operation adds a new keyspace to your account. In an Amazon Web Services account, keyspace names
+ * must be unique within each Region.
+ * CreateKeyspace
is an asynchronous operation. You can monitor the creation status of the new keyspace
+ * by using the GetKeyspace
operation.
For more information, see Creating keyspaces in the Amazon Keyspaces Developer + * Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, CreateKeyspaceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, CreateKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new CreateKeyspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateKeyspaceCommandInput} for command's `input` shape. + * @see {@link CreateKeyspaceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class CreateKeyspaceCommand extends $Command< + CreateKeyspaceCommandInput, + CreateKeyspaceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateKeyspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe CreateTable
operation adds a new table to the specified keyspace. Within a keyspace, table names
+ * must be unique.
+ * CreateTable
is an asynchronous operation. When the request is received, the status of the table is set to CREATING
.
+ * You can monitor the creation status of the new table by using the GetTable
+ * operation, which returns the current status
of the table. You can start using a table when the status is ACTIVE
.
For more information, see Creating tables in the Amazon Keyspaces Developer + * Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, CreateTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, CreateTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new CreateTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateTableCommandInput} for command's `input` shape. + * @see {@link CreateTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class CreateTableCommand extends $Command< + CreateTableCommandInput, + CreateTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe DeleteKeyspace
operation deletes a keyspace and all of its tables.
+ * The DeleteTable
operation deletes a table and all of its data. After a DeleteTable
request is received,
+ * the specified table is in the DELETING
state until Amazon Keyspaces completes the deletion. If the table
+ * is in the ACTIVE
state, you can delete it. If a table is either in the CREATING
or UPDATING
states, then
+ * Amazon Keyspaces returns a ResourceInUseException
. If the specified table does not exist, Amazon Keyspaces returns
+ * a ResourceNotFoundException
. If the table is already in the DELETING
state, no error is returned.
Returns the name and the Amazon Resource Name (ARN) of the specified table.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, GetKeyspaceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, GetKeyspaceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new GetKeyspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetKeyspaceCommandInput} for command's `input` shape. + * @see {@link GetKeyspaceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class GetKeyspaceCommand extends $Command< + GetKeyspaceCommandInput, + GetKeyspaceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetKeyspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns information about the table, including the table's name and current status, the keyspace name, + * configuration settings, and metadata.
+ *To read table metadata using GetTable
, Select
action
+ * permissions for the table and system tables are required to complete the operation.
Returns a list of keyspaces.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListKeyspacesCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListKeyspacesCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListKeyspacesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListKeyspacesCommandInput} for command's `input` shape. + * @see {@link ListKeyspacesCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListKeyspacesCommand extends $Command< + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListKeyspacesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of tables for a specified keyspace.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListTablesCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListTablesCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListTablesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTablesCommandInput} for command's `input` shape. + * @see {@link ListTablesCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListTablesCommand extends $Command< + ListTablesCommandInput, + ListTablesCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTablesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of all tags associated with the specified Amazon Keyspaces resource.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, ListTagsForResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, ListTagsForResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRestores the specified table to the specified point in time within the
+ * earliest_restorable_timestamp
and the current time. For more information about restore points, see
+ *
+ * Time window for PITR continuous backups in the Amazon Keyspaces Developer Guide.
+ *
+ * Any number of users can execute up to 4 concurrent restores (any type of restore) in a given account.
+ *When you restore using point in time recovery, Amazon Keyspaces restores your source table's schema and data to the state
+ * based on the selected timestamp (day:hour:minute:second)
to a new table. The Time to Live (TTL) settings
+ * are also restored to the state based on the selected timestamp.
In addition to the table's schema, data, and TTL settings, RestoreTable
restores the capacity mode, encryption, and
+ * point-in-time recovery settings from the source table.
+ * Unlike the table's schema data and TTL settings, which are restored based on the selected timestamp,
+ * these settings are always restored based on the table's settings as of the current time or when the table was deleted.
You can also overwrite these settings during restore:
+ *Read/write capacity mode
+ *Provisioned throughput capacity settings
+ *Point-in-time (PITR) settings
+ *Tags
+ *For more information, see PITR restore settings in the Amazon Keyspaces Developer + * Guide.
+ *The following settings are not restored, and you must configure them manually for the + * new table.
+ *Automatic scaling policies (for tables that use provisioned capacity + * mode)
+ *Identity and Access Management (IAM) policies
+ *Amazon CloudWatch metrics and alarms
+ *Associates a set of tags with a Amazon Keyspaces resource. You can then + * activate these user-defined tags so that they appear on the Cost Management Console for cost allocation tracking. + * For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ *For IAM policy examples that show how to control access to Amazon Keyspaces resources based on tags, + * see Amazon Keyspaces resource access based on tags + * in the Amazon Keyspaces Developer Guide.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, TagResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, TagResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRemoves the association of tags from a Amazon Keyspaces resource.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, UntagResourceCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, UntagResourceCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackAdds new columns to the table or updates one of the table's settings, for example + * capacity mode, encryption, point-in-time recovery, or ttl settings. + * Note that you can only update one specific table setting per update operation.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KeyspacesClient, UpdateTableCommand } from "@aws-sdk/client-keyspaces"; // ES Modules import + * // const { KeyspacesClient, UpdateTableCommand } = require("@aws-sdk/client-keyspaces"); // CommonJS import + * const client = new KeyspacesClient(config); + * const command = new UpdateTableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateTableCommandInput} for command's `input` shape. + * @see {@link UpdateTableCommandOutput} for command's `response` shape. + * @see {@link KeyspacesClientResolvedConfig | config} for KeyspacesClient's `config` shape. + * + */ +export class UpdateTableCommand extends $Command< + UpdateTableCommandInput, + UpdateTableCommandOutput, + KeyspacesClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateTableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackYou do not have sufficient access to perform this action.
+ */ +export class AccessDeniedException extends __BaseException { + readonly name: "AccessDeniedException" = "AccessDeniedException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeAmazon Keyspaces has two read/write capacity modes for processing reads and writes on your tables:
+ *+ * On-demand (default) + *
+ *+ * Provisioned + *
+ *The read/write capacity mode that you choose controls how you are charged for read and + * write throughput and how table throughput capacity is managed.
+ *For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ +export interface CapacitySpecification { + /** + *The read/write throughput capacity mode for a table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
. The provisioned capacity mode requires
+ * readCapacityUnits
and writeCapacityUnits
as inputs.
The default is throughput_mode:PAY_PER_REQUEST
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ + throughputMode: ThroughputMode | string | undefined; + + /** + *The throughput capacity specified for read
operations defined in read capacity units
+ * (RCUs)
.
The throughput capacity specified for write
operations defined in write capacity units
+ * (WCUs)
.
The read/write throughput capacity mode for a table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ +export interface CapacitySpecificationSummary { + /** + *The read/write throughput capacity mode for a table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
. The provisioned capacity mode requires
+ * readCapacityUnits
and writeCapacityUnits
as inputs.
The default is throughput_mode:PAY_PER_REQUEST
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ + throughputMode: ThroughputMode | string | undefined; + + /** + *The throughput capacity specified for read
operations defined in read capacity units
+ * (RCUs)
.
The throughput capacity specified for write
operations defined in write capacity units
+ * (WCUs)
.
The timestamp of the last operation that changed the provisioned throughput capacity of a table.
+ */ + lastUpdateToPayPerRequestTimestamp?: Date; +} + +export namespace CapacitySpecificationSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacitySpecificationSummary): any => ({ + ...obj, + }); +} + +export enum SortOrder { + ASC = "ASC", + DESC = "DESC", +} + +/** + *The optional clustering column portion of your primary key determines how the data is clustered and sorted within each partition.
+ */ +export interface ClusteringKey { + /** + *The name(s) of the clustering column(s).
+ */ + name: string | undefined; + + /** + *Sets the ascendant (ASC
) or descendant (DESC
) order modifier.
The names and data types of regular columns.
+ */ +export interface ColumnDefinition { + /** + *The name of the column.
+ */ + name: string | undefined; + + /** + *The data type of the column. For a list of available data types, see Data types in the Amazon Keyspaces Developer + * Guide.
+ */ + type: string | undefined; +} + +export namespace ColumnDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ColumnDefinition): any => ({ + ...obj, + }); +} + +/** + *An optional comment that describes the table.
+ */ +export interface Comment { + /** + *An optional description of the table.
+ */ + message: string | undefined; +} + +export namespace Comment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Comment): any => ({ + ...obj, + }); +} + +/** + *Amazon Keyspaces could not complete the requested action. This error may occur if you try to + * perform an action and the same or a different action is already + * in progress, or if you try to create a resource that already exists.
+ */ +export class ConflictException extends __BaseException { + readonly name: "ConflictException" = "ConflictException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeDescribes a tag. A tag is a key-value pair. You can add up to 50 tags to a single Amazon Keyspaces resource.
+ *Amazon Web Services-assigned tag names and values are automatically assigned the aws:
prefix, which the user cannot assign.
+ * Amazon Web Services-assigned tag names do not count towards the tag limit of 50. User-assigned tag names have the
+ * prefix user:
in the Cost Allocation Report. You cannot backdate the application of a tag.
For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ */ +export interface Tag { + /** + *The key of the tag. Tag keys are case sensitive. Each Amazon Keyspaces resource can only have up to one tag with the same key. If you try to add an + * existing tag (same key), the existing tag value will be updated to the new value.
+ */ + key: string | undefined; + + /** + *The value of the tag. Tag values are case-sensitive and can be null.
+ */ + value: string | undefined; +} + +export namespace Tag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Tag): any => ({ + ...obj, + }); +} + +export interface CreateKeyspaceRequest { + /** + *The name of the keyspace to be created.
+ */ + keyspaceName: string | undefined; + + /** + *A list of key-value pair tags to be attached to the keyspace.
+ *For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ */ + tags?: Tag[]; +} + +export namespace CreateKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface CreateKeyspaceResponse { + /** + *The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).
+ */ + resourceArn: string | undefined; +} + +export namespace CreateKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateKeyspaceResponse): any => ({ + ...obj, + }); +} + +/** + *Amazon Keyspaces was unable to fully process this request because of an internal server error.
+ */ +export class InternalServerException extends __BaseException { + readonly name: "InternalServerException" = "InternalServerException"; + readonly $fault: "server" = "server"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeThe operation exceeded the service quota for this resource. For more information on service quotas, see Quotas in the Amazon Keyspaces Developer + * Guide.
+ */ +export class ServiceQuotaExceededException extends __BaseException { + readonly name: "ServiceQuotaExceededException" = "ServiceQuotaExceededException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeThe operation failed due to an invalid or malformed request.
+ */ +export class ValidationException extends __BaseException { + readonly name: "ValidationException" = "ValidationException"; + readonly $fault: "client" = "client"; + /** + * @internal + */ + constructor(opts: __ExceptionOptionType+ * Amazon Keyspaces encrypts and decrypts the table data at rest transparently and integrates with Key Management Service for storing and managing the encryption key. + * You can choose one of the following KMS keys (KMS keys):
+ *+ * Amazon Web Services owned key - This is the default encryption type. The key is owned by Amazon Keyspaces (no additional charge).
+ *Customer managed key - This key is stored in your account and is created, owned, and managed by you. You have full control over the customer + * managed key (KMS charges apply).
+ *For more information about encryption at rest in Amazon Keyspaces, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.
+ *For more information about KMS, see KMS management service concepts in the Key Management Service Developer Guide.
+ */ +export interface EncryptionSpecification { + /** + *+ * The encryption option specified for the table. You can choose one of the following KMS keys (KMS keys):
+ *
+ * type:AWS_OWNED_KMS_KEY
- This key is owned by Amazon Keyspaces.
+ * type:CUSTOMER_MANAGED_KMS_KEY
- This key is stored in your account and is created, owned, and managed by you.
+ * This option
+ * requires the kms_key_identifier
of the KMS key in Amazon Resource Name (ARN) format as input.
The default is type:AWS_OWNED_KMS_KEY
.
For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.
+ */ + type: EncryptionType | string | undefined; + + /** + *The Amazon Resource Name (ARN) of the customer managed KMS key, for example kms_key_identifier:ARN
.
+ *
Point-in-time recovery (PITR) helps protect your Amazon Keyspaces tables from accidental write or delete operations by providing you continuous backups of your table data.
+ *For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.
+ */ +export interface PointInTimeRecovery { + /** + *The options are:
+ *
+ * ENABLED
+ *
+ * DISABLED
+ *
The partition key portion of the primary key is required + * and determines how Amazon Keyspaces stores the data. + * The partition key can be a single column, or it can be a compound value composed of two or more columns.
+ */ +export interface PartitionKey { + /** + *The name(s) of the partition key column(s).
+ */ + name: string | undefined; +} + +export namespace PartitionKey { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PartitionKey): any => ({ + ...obj, + }); +} + +/** + *The static columns of the table. Static columns store values that are shared by all rows in the same partition.
+ */ +export interface StaticColumn { + /** + *The name of the static column.
+ */ + name: string | undefined; +} + +export namespace StaticColumn { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StaticColumn): any => ({ + ...obj, + }); +} + +/** + *Describes the schema of the table.
+ */ +export interface SchemaDefinition { + /** + *The regular columns of the table.
+ */ + allColumns: ColumnDefinition[] | undefined; + + /** + *The columns that are part of the partition key of the table .
+ */ + partitionKeys: PartitionKey[] | undefined; + + /** + *The columns that are part of the clustering key of the table.
+ */ + clusteringKeys?: ClusteringKey[]; + + /** + *The columns that have been defined as STATIC
. Static columns store values that are shared by all rows in the same partition.
Enable custom Time to Live (TTL) settings for rows and columns without setting a TTL default for the specified table.
+ *For more information, see Enabling TTL on tables in the Amazon Keyspaces Developer + * Guide.
+ */ +export interface TimeToLive { + /** + *Shows how to enable custom Time to Live (TTL) settings for the specified table.
+ */ + status: TimeToLiveStatus | string | undefined; +} + +export namespace TimeToLive { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TimeToLive): any => ({ + ...obj, + }); +} + +export interface CreateTableRequest { + /** + *The name of the keyspace that the table is going to be created in.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the table.
+ */ + tableName: string | undefined; + + /** + *The schemaDefinition
consists of the
+ * following parameters.
For each column to be created:
+ *
+ *
+ * name
+ * - The name
+ * of the column.
+ *
+ * type
+ *
- An Amazon Keyspaces
+ * data type. For more information, see Data types in the Amazon Keyspaces Developer
+ * Guide.
The primary key of the table consists of the + * following columns:
+ *
+ * partitionKeys
- The partition key can be a single column, or it can be a
+ * compound value composed of two or more columns. The partition
+ * key portion of the primary key is required and determines how
+ * Amazon Keyspaces stores your data.
+ *
+ * name
+ * - The name of each partition key column.
+ * clusteringKeys
- The optional clustering column portion of your primary key
+ * determines how the data is clustered and sorted within each
+ * partition.
+ *
+ * name
+ * - The name of the clustering column.
+ *
+ * orderBy
+ * - Sets the
+ * ascendant (ASC
) or descendant (DESC
) order modifier.
To define a column as static use
+ * staticColumns
+ *
-
+ * Static columns store values that are shared by all rows in the same partition:
+ *
+ * name
+ * - The name
+ * of the column.
+ *
+ * type
+ *
- An Amazon Keyspaces
+ * data type.
This parameter allows to enter a description of the table.
+ */ + comment?: Comment; + + /** + *Specifies the read/write throughput capacity mode for the table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
. The provisioned capacity mode requires
+ * readCapacityUnits
and writeCapacityUnits
as inputs.
The default is
+ * throughput_mode:PAY_PER_REQUEST
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ + capacitySpecification?: CapacitySpecification; + + /** + *Specifies how the encryption key for encryption at rest is managed for the table. You can choose one of the following KMS key (KMS key):
+ * + *
+ * type:AWS_OWNED_KMS_KEY
- This key is owned by Amazon Keyspaces.
+ * type:CUSTOMER_MANAGED_KMS_KEY
- This key is stored in your account and is created, owned, and managed by you.
+ * This option
+ * requires the kms_key_identifier
of the KMS key in Amazon Resource Name (ARN) format as input.
The default is type:AWS_OWNED_KMS_KEY
.
For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.
+ */ + encryptionSpecification?: EncryptionSpecification; + + /** + *
+ * Specifies if pointInTimeRecovery
is enabled or disabled for the
+ * table. The options are:
+ * ENABLED
+ *
+ * DISABLED
+ *
If it's not specified, the default is DISABLED
.
For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.
+ */ + pointInTimeRecovery?: PointInTimeRecovery; + + /** + *+ * Enables Time to Live custom settings for the + * table. The options are:
+ *
+ * status:enabled
+ *
+ * status:disabled
+ *
The default is status:disabled
. After
+ * ttl
is enabled, you can't disable it
+ * for the table.
For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer + * Guide.
+ */ + ttl?: TimeToLive; + + /** + *The default Time to Live setting in seconds for the + * table.
+ *For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer + * Guide.
+ */ + defaultTimeToLive?: number; + + /** + *A list of key-value pair tags to be + * attached to the resource.
+ *For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ */ + tags?: Tag[]; +} + +export namespace CreateTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTableRequest): any => ({ + ...obj, + }); +} + +export interface CreateTableResponse { + /** + *The unique identifier of the table in the format of an Amazon Resource Name (ARN).
+ */ + resourceArn: string | undefined; +} + +export namespace CreateTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTableResponse): any => ({ + ...obj, + }); +} + +/** + *The operation tried to access a keyspace or table that doesn't exist. The resource might not be specified correctly, or its status might not be ACTIVE
.
The unique identifier in the format of Amazon Resource Name (ARN), for the resource not found.
+ */ + resourceArn?: string; + + /** + * @internal + */ + constructor(opts: __ExceptionOptionTypeThe name of the keyspace to be deleted.
+ */ + keyspaceName: string | undefined; +} + +export namespace DeleteKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface DeleteKeyspaceResponse {} + +export namespace DeleteKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteKeyspaceResponse): any => ({ + ...obj, + }); +} + +export interface DeleteTableRequest { + /** + *The name of the keyspace of the to be deleted table.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the table to be deleted.
+ */ + tableName: string | undefined; +} + +export namespace DeleteTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTableRequest): any => ({ + ...obj, + }); +} + +export interface DeleteTableResponse {} + +export namespace DeleteTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTableResponse): any => ({ + ...obj, + }); +} + +export interface GetKeyspaceRequest { + /** + *The name of the keyspace.
+ */ + keyspaceName: string | undefined; +} + +export namespace GetKeyspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetKeyspaceRequest): any => ({ + ...obj, + }); +} + +export interface GetKeyspaceResponse { + /** + *The name of the keyspace.
+ */ + keyspaceName: string | undefined; + + /** + *The ARN of the keyspace.
+ */ + resourceArn: string | undefined; +} + +export namespace GetKeyspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetKeyspaceResponse): any => ({ + ...obj, + }); +} + +export interface GetTableRequest { + /** + *The name of the keyspace that the table is stored in.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the table.
+ */ + tableName: string | undefined; +} + +export namespace GetTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableRequest): any => ({ + ...obj, + }); +} + +/** + *The point-in-time recovery status of the specified table.
+ */ +export interface PointInTimeRecoverySummary { + /** + *Shows if point-in-time recovery is enabled or disabled for the specified table.
+ */ + status: PointInTimeRecoveryStatus | string | undefined; + + /** + *Specifies the earliest possible restore point of the table in ISO 8601 format.
+ */ + earliestRestorableTimestamp?: Date; +} + +export namespace PointInTimeRecoverySummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PointInTimeRecoverySummary): any => ({ + ...obj, + }); +} + +export enum TableStatus { + ACTIVE = "ACTIVE", + CREATING = "CREATING", + DELETED = "DELETED", + DELETING = "DELETING", + INACCESSIBLE_ENCRYPTION_CREDENTIALS = "INACCESSIBLE_ENCRYPTION_CREDENTIALS", + RESTORING = "RESTORING", + UPDATING = "UPDATING", +} + +export interface GetTableResponse { + /** + *The name of the keyspace that the specified table is stored in.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the specified table.
+ */ + tableName: string | undefined; + + /** + *The Amazon Resource Name (ARN) of the specified table.
+ */ + resourceArn: string | undefined; + + /** + *The creation timestamp of the specified table.
+ */ + creationTimestamp?: Date; + + /** + *The current status of the specified table.
+ */ + status?: TableStatus | string; + + /** + *The schema definition of the specified table.
+ */ + schemaDefinition?: SchemaDefinition; + + /** + *The read/write throughput capacity mode for a table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
.
The encryption settings of the specified table.
+ */ + encryptionSpecification?: EncryptionSpecification; + + /** + *The point-in-time recovery status of the specified table.
+ */ + pointInTimeRecovery?: PointInTimeRecoverySummary; + + /** + *The custom Time to Live settings of the specified table.
+ */ + ttl?: TimeToLive; + + /** + *The default Time to Live settings of the specified table.
+ */ + defaultTimeToLive?: number; + + /** + *The the description of the specified table.
+ */ + comment?: Comment; +} + +export namespace GetTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableResponse): any => ({ + ...obj, + }); +} + +export interface ListKeyspacesRequest { + /** + *The pagination token. To resume pagination, provide the NextToken
value as argument of a subsequent API invocation.
The total number of keyspaces to return in the output. If the total number of keyspaces available
+ * is more than the value specified, a NextToken
is provided in the output. To resume pagination,
+ * provide the NextToken
value as an argument of a subsequent API invocation.
Represents the properties of a keyspace.
+ */ +export interface KeyspaceSummary { + /** + *The name of the keyspace.
+ */ + keyspaceName: string | undefined; + + /** + *The unique identifier of the keyspace in the format of an Amazon Resource Name (ARN).
+ */ + resourceArn: string | undefined; +} + +export namespace KeyspaceSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KeyspaceSummary): any => ({ + ...obj, + }); +} + +export interface ListKeyspacesResponse { + /** + *A token to specify where to start paginating. This is the NextToken
from a previously truncated response.
A list of keyspaces.
+ */ + keyspaces: KeyspaceSummary[] | undefined; +} + +export namespace ListKeyspacesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListKeyspacesResponse): any => ({ + ...obj, + }); +} + +export interface ListTablesRequest { + /** + *The pagination token. To resume pagination, provide the NextToken
value as an argument of a subsequent API invocation.
The total number of tables to return in the output. If the total number of tables available
+ * is more than the value specified, a NextToken
is provided in the output. To resume pagination,
+ * provide the NextToken
value as an argument of a subsequent API invocation.
The name of the keyspace.
+ */ + keyspaceName: string | undefined; +} + +export namespace ListTablesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTablesRequest): any => ({ + ...obj, + }); +} + +/** + *Returns the name of the specified table, the keyspace it is stored in, and the unique identifier in the format of an Amazon Resource Name (ARN).
+ */ +export interface TableSummary { + /** + *The name of the keyspace that the table is stored in.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the table.
+ */ + tableName: string | undefined; + + /** + *The unique identifier of the table in the format of an Amazon Resource Name (ARN).
+ */ + resourceArn: string | undefined; +} + +export namespace TableSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TableSummary): any => ({ + ...obj, + }); +} + +export interface ListTablesResponse { + /** + *A token to specify where to start paginating. This is the NextToken
from a previously truncated response.
A list of tables.
+ */ + tables?: TableSummary[]; +} + +export namespace ListTablesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTablesResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *The Amazon Resource Name (ARN) of the Amazon Keyspaces resource.
+ */ + resourceArn: string | undefined; + + /** + *The pagination token. To resume pagination, provide the NextToken
value as argument of a subsequent API invocation.
The total number of tags to return in the output. If the total number of tags available
+ * is more than the value specified, a NextToken
is provided in the output. To resume pagination,
+ * provide the NextToken
value as an argument of a subsequent API invocation.
A token to specify where to start paginating. This is the NextToken
from a previously truncated response.
A list of tags.
+ */ + tags?: Tag[]; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface RestoreTableRequest { + /** + *The keyspace name of the source table.
+ */ + sourceKeyspaceName: string | undefined; + + /** + *The name of the source table.
+ */ + sourceTableName: string | undefined; + + /** + *The name of the target keyspace.
+ */ + targetKeyspaceName: string | undefined; + + /** + *The name of the target table.
+ */ + targetTableName: string | undefined; + + /** + *The restore timestamp in ISO 8601 format.
+ */ + restoreTimestamp?: Date; + + /** + *Specifies the read/write throughput capacity mode for the target table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
. The provisioned capacity mode requires
+ * readCapacityUnits
and writeCapacityUnits
as inputs.
The default is throughput_mode:PAY_PER_REQUEST
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ + capacitySpecificationOverride?: CapacitySpecification; + + /** + *+ * Specifies the encryption settings for the target table. You can choose one of the following KMS key (KMS key):
+ * + *
+ * type:AWS_OWNED_KMS_KEY
- This key is owned by Amazon Keyspaces.
+ * type:CUSTOMER_MANAGED_KMS_KEY
- This key is stored in your account and is created, owned, and managed by you.
+ * This option
+ * requires the kms_key_identifier
of the KMS key in Amazon Resource Name (ARN) format as input.
The default is type:AWS_OWNED_KMS_KEY
.
For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.
+ */ + encryptionSpecificationOverride?: EncryptionSpecification; + + /** + *
+ * Specifies the pointInTimeRecovery
settings for the target
+ * table. The options are:
+ * ENABLED
+ *
+ * DISABLED
+ *
If it's not specified, the default is DISABLED
.
For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.
+ */ + pointInTimeRecoveryOverride?: PointInTimeRecovery; + + /** + *A list of key-value pair tags to be + * attached to the restored table.
+ *For more information, see Adding tags and labels to Amazon Keyspaces resources in the Amazon Keyspaces Developer + * Guide.
+ */ + tagsOverride?: Tag[]; +} + +export namespace RestoreTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreTableRequest): any => ({ + ...obj, + }); +} + +export interface RestoreTableResponse { + /** + *The Amazon Resource Name (ARN) of the restored table.
+ */ + restoredTableARN: string | undefined; +} + +export namespace RestoreTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreTableResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *The Amazon Resource Name (ARN) of the Amazon Keyspaces resource to which to add tags.
+ */ + resourceArn: string | undefined; + + /** + *The tags to be assigned to the Amazon Keyspaces resource.
+ */ + tags: Tag[] | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *The Amazon Keyspaces resource that the tags will be removed from. This value is an Amazon Resource Name (ARN).
+ */ + resourceArn: string | undefined; + + /** + *A list of existing tags to be removed from the Amazon Keyspaces resource.
+ */ + tags: Tag[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateTableRequest { + /** + *The name of the keyspace the specified table is stored in.
+ */ + keyspaceName: string | undefined; + + /** + *The name of the table.
+ */ + tableName: string | undefined; + + /** + *For each column to be added to the specified table:
+ *
+ *
+ * name
+ * - The name
+ * of the column.
+ *
+ * type
+ *
- An Amazon Keyspaces
+ * data type. For more information, see Data types in the Amazon Keyspaces Developer
+ * Guide.
Modifies the read/write throughput capacity mode for the table. The options are:
+ *
+ * throughputMode:PAY_PER_REQUEST
and
+ * throughputMode:PROVISIONED
. The provisioned capacity mode requires
+ * readCapacityUnits
and writeCapacityUnits
as inputs.
The default is throughput_mode:PAY_PER_REQUEST
.
For more information, see Read/write capacity modes in the Amazon Keyspaces Developer + * Guide.
+ */ + capacitySpecification?: CapacitySpecification; + + /** + *+ * Modifies the encryption settings of the table. You can choose one of the following KMS key (KMS key):
+ * + *
+ * type:AWS_OWNED_KMS_KEY
- This key is owned by Amazon Keyspaces.
+ * type:CUSTOMER_MANAGED_KMS_KEY
- This key is stored in your account and is created, owned, and managed by you.
+ * This option
+ * requires the kms_key_identifier
of the KMS key in Amazon Resource Name (ARN) format as input.
The default is AWS_OWNED_KMS_KEY
.
For more information, see Encryption at rest in the Amazon Keyspaces Developer + * Guide.
+ */ + encryptionSpecification?: EncryptionSpecification; + + /** + *
+ * Modifies the pointInTimeRecovery
settings of the table. The options are:
+ * ENABLED
+ *
+ * DISABLED
+ *
If it's not specified, the default is DISABLED
.
For more information, see Point-in-time recovery in the Amazon Keyspaces Developer + * Guide.
+ */ + pointInTimeRecovery?: PointInTimeRecovery; + + /** + *Modifies Time to Live custom settings for the table. The options are:
+ *
+ * status:enabled
+ *
+ * status:disabled
+ *
The default is status:disabled
. After
+ * ttl
is enabled, you can't disable it
+ * for the table.
For more information, see Expiring data by using Amazon Keyspaces Time to Live (TTL) in the Amazon Keyspaces Developer + * Guide.
+ */ + ttl?: TimeToLive; + + /** + *The default Time to Live setting in seconds for the table.
+ *For more information, see Setting the default TTL value for a table in the Amazon Keyspaces Developer + * Guide.
+ */ + defaultTimeToLive?: number; +} + +export namespace UpdateTableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableRequest): any => ({ + ...obj, + }); +} + +export interface UpdateTableResponse { + /** + *The Amazon Resource Name (ARN) of the modified table.
+ */ + resourceArn: string | undefined; +} + +export namespace UpdateTableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-keyspaces/src/pagination/Interfaces.ts b/clients/client-keyspaces/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..3f5c65fc815a --- /dev/null +++ b/clients/client-keyspaces/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; + +export interface KeyspacesPaginationConfiguration extends PaginationConfiguration { + client: Keyspaces | KeyspacesClient; +} diff --git a/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts b/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts new file mode 100644 index 000000000000..3b93acdcad05 --- /dev/null +++ b/clients/client-keyspaces/src/pagination/ListKeyspacesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListKeyspacesCommand, + ListKeyspacesCommandInput, + ListKeyspacesCommandOutput, +} from "../commands/ListKeyspacesCommand"; +import { Keyspaces } from "../Keyspaces"; +import { KeyspacesClient } from "../KeyspacesClient"; +import { KeyspacesPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KeyspacesClient, + input: ListKeyspacesCommandInput, + ...args: any +): Promise