diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4b50c681fb2..eace9aed17e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,12 @@
+Release v1.50.38 (2024-03-13)
+===
+
+### Service Client Updates
+* `service/ivs-realtime`: Updates service API and documentation
+* `service/kinesisanalyticsv2`: Updates service API and documentation
+* `service/s3`: Updates service examples
+ * This release makes the default option for S3 on Outposts request signing to use the SigV4A algorithm when using AWS Common Runtime (CRT).
+
Release v1.50.37 (2024-03-12)
===
diff --git a/aws/version.go b/aws/version.go
index 301eff20ee8..b6d9a0d8e62 100644
--- a/aws/version.go
+++ b/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.50.37"
+const SDKVersion = "1.50.38"
diff --git a/models/apis/ivs-realtime/2020-07-14/api-2.json b/models/apis/ivs-realtime/2020-07-14/api-2.json
index ce282939894..92c38d50924 100644
--- a/models/apis/ivs-realtime/2020-07-14/api-2.json
+++ b/models/apis/ivs-realtime/2020-07-14/api-2.json
@@ -945,9 +945,17 @@
"GridConfiguration":{
"type":"structure",
"members":{
- "featuredParticipantAttribute":{"shape":"AttributeKey"}
+ "featuredParticipantAttribute":{"shape":"AttributeKey"},
+ "gridGap":{"shape":"GridGap"},
+ "omitStoppedVideo":{"shape":"OmitStoppedVideo"},
+ "videoAspectRatio":{"shape":"VideoAspectRatio"},
+ "videoFillMode":{"shape":"VideoFillMode"}
}
},
+ "GridGap":{
+ "type":"integer",
+ "min":0
+ },
"Height":{
"type":"integer",
"box":true,
@@ -966,7 +974,8 @@
"LayoutConfiguration":{
"type":"structure",
"members":{
- "grid":{"shape":"GridConfiguration"}
+ "grid":{"shape":"GridConfiguration"},
+ "pip":{"shape":"PipConfiguration"}
}
},
"ListCompositionsRequest":{
@@ -1155,6 +1164,7 @@
"max":100,
"min":1
},
+ "OmitStoppedVideo":{"type":"boolean"},
"PaginationToken":{
"type":"string",
"max":1024,
@@ -1296,6 +1306,51 @@
},
"exception":true
},
+ "PipBehavior":{
+ "type":"string",
+ "enum":[
+ "STATIC",
+ "DYNAMIC"
+ ]
+ },
+ "PipConfiguration":{
+ "type":"structure",
+ "members":{
+ "featuredParticipantAttribute":{"shape":"AttributeKey"},
+ "gridGap":{"shape":"GridGap"},
+ "omitStoppedVideo":{"shape":"OmitStoppedVideo"},
+ "pipBehavior":{"shape":"PipBehavior"},
+ "pipHeight":{"shape":"PipHeight"},
+ "pipOffset":{"shape":"PipOffset"},
+ "pipParticipantAttribute":{"shape":"AttributeKey"},
+ "pipPosition":{"shape":"PipPosition"},
+ "pipWidth":{"shape":"PipWidth"},
+ "videoFillMode":{"shape":"VideoFillMode"}
+ }
+ },
+ "PipHeight":{
+ "type":"integer",
+ "box":true,
+ "min":1
+ },
+ "PipOffset":{
+ "type":"integer",
+ "min":0
+ },
+ "PipPosition":{
+ "type":"string",
+ "enum":[
+ "TOP_LEFT",
+ "TOP_RIGHT",
+ "BOTTOM_LEFT",
+ "BOTTOM_RIGHT"
+ ]
+ },
+ "PipWidth":{
+ "type":"integer",
+ "box":true,
+ "min":1
+ },
"Published":{"type":"boolean"},
"RecordingConfiguration":{
"type":"structure",
@@ -1611,6 +1666,23 @@
"width":{"shape":"Width"}
}
},
+ "VideoAspectRatio":{
+ "type":"string",
+ "enum":[
+ "AUTO",
+ "VIDEO",
+ "SQUARE",
+ "PORTRAIT"
+ ]
+ },
+ "VideoFillMode":{
+ "type":"string",
+ "enum":[
+ "FILL",
+ "COVER",
+ "CONTAIN"
+ ]
+ },
"Width":{
"type":"integer",
"box":true,
diff --git a/models/apis/ivs-realtime/2020-07-14/docs-2.json b/models/apis/ivs-realtime/2020-07-14/docs-2.json
index 7fea3339106..cb2a412c8a8 100644
--- a/models/apis/ivs-realtime/2020-07-14/docs-2.json
+++ b/models/apis/ivs-realtime/2020-07-14/docs-2.json
@@ -39,7 +39,9 @@
"AttributeKey": {
"base": null,
"refs": {
- "GridConfiguration$featuredParticipantAttribute": "
This attribute name identifies the featured slot. A participant with this attribute set to \"true\"
(as a string value) in ParticipantTokenConfiguration is placed in the featured slot.
"
+ "GridConfiguration$featuredParticipantAttribute": "This attribute name identifies the featured slot. A participant with this attribute set to \"true\"
(as a string value) in ParticipantTokenConfiguration is placed in the featured slot.
",
+ "PipConfiguration$featuredParticipantAttribute": "This attribute name identifies the featured slot. A participant with this attribute set to \"true\"
(as a string value) in ParticipantTokenConfiguration is placed in the featured slot.
",
+ "PipConfiguration$pipParticipantAttribute": "Identifies the PiP slot. A participant with this attribute set to \"true\"
(as a string value) in ParticipantTokenConfiguration is placed in the PiP slot.
"
}
},
"Bitrate": {
@@ -389,6 +391,13 @@
"LayoutConfiguration$grid": "Configuration related to grid layout. Default: Grid layout.
"
}
},
+ "GridGap": {
+ "base": null,
+ "refs": {
+ "GridConfiguration$gridGap": "Specifies the spacing between participant tiles in pixels. Default: 2
.
",
+ "PipConfiguration$gridGap": "Specifies the spacing between participant tiles in pixels. Default: 0
.
"
+ }
+ },
"Height": {
"base": null,
"refs": {
@@ -529,6 +538,13 @@
"ListStorageConfigurationsRequest$maxResults": "Maximum number of storage configurations to return. Default: your service quota or 100, whichever is smaller.
"
}
},
+ "OmitStoppedVideo": {
+ "base": null,
+ "refs": {
+ "GridConfiguration$omitStoppedVideo": "Determines whether to omit participants with stopped video in the composition. Default: false
.
",
+ "PipConfiguration$omitStoppedVideo": "Determines whether to omit participants with stopped video in the composition. Default: false
.
"
+ }
+ },
"PaginationToken": {
"base": null,
"refs": {
@@ -689,6 +705,42 @@
"refs": {
}
},
+ "PipBehavior": {
+ "base": null,
+ "refs": {
+ "PipConfiguration$pipBehavior": "Defines PiP behavior when all participants have left. Default: STATIC
.
"
+ }
+ },
+ "PipConfiguration": {
+ "base": "Configuration information specific to Picture-in-Picture (PiP) layout, for server-side composition.
",
+ "refs": {
+ "LayoutConfiguration$pip": "Configuration related to PiP layout.
"
+ }
+ },
+ "PipHeight": {
+ "base": null,
+ "refs": {
+ "PipConfiguration$pipHeight": "Specifies the height of the PiP window in pixels. When this is not set explicitly, pipHeight
’s value will be based on the size of the composition and the aspect ratio of the participant’s video.
"
+ }
+ },
+ "PipOffset": {
+ "base": null,
+ "refs": {
+ "PipConfiguration$pipOffset": "Sets the PiP window’s offset position in pixels from the closest edges determined by PipPosition
. Default: 0
.
"
+ }
+ },
+ "PipPosition": {
+ "base": null,
+ "refs": {
+ "PipConfiguration$pipPosition": "Determines the corner position of the PiP window. Default: BOTTOM_RIGHT
.
"
+ }
+ },
+ "PipWidth": {
+ "base": null,
+ "refs": {
+ "PipConfiguration$pipWidth": "Specifies the width of the PiP window in pixels. When this is not set explicitly, pipWidth
’s value will be based on the size of the composition and the aspect ratio of the participant’s video.
"
+ }
+ },
"Published": {
"base": null,
"refs": {
@@ -1011,6 +1063,19 @@
"EncoderConfiguration$video": "Video configuration. Default: video resolution 1280x720, bitrate 2500 kbps, 30 fps
"
}
},
+ "VideoAspectRatio": {
+ "base": null,
+ "refs": {
+ "GridConfiguration$videoAspectRatio": "Sets the non-featured participant display mode. Default: VIDEO
.
"
+ }
+ },
+ "VideoFillMode": {
+ "base": null,
+ "refs": {
+ "GridConfiguration$videoFillMode": "Defines how video fits within the participant tile. When not set, videoFillMode
defaults to COVER
fill mode for participants in the grid and to CONTAIN
fill mode for featured participants.
",
+ "PipConfiguration$videoFillMode": "Defines how video fits within the participant tile. Default: COVER
.
"
+ }
+ },
"Width": {
"base": null,
"refs": {
diff --git a/models/apis/kinesisanalyticsv2/2018-05-23/api-2.json b/models/apis/kinesisanalyticsv2/2018-05-23/api-2.json
index 9f3631d5065..08191b00ad0 100644
--- a/models/apis/kinesisanalyticsv2/2018-05-23/api-2.json
+++ b/models/apis/kinesisanalyticsv2/2018-05-23/api-2.json
@@ -2418,7 +2418,8 @@
"SnapshotName":{"shape":"SnapshotName"},
"SnapshotStatus":{"shape":"SnapshotStatus"},
"ApplicationVersionId":{"shape":"ApplicationVersionId"},
- "SnapshotCreationTimestamp":{"shape":"Timestamp"}
+ "SnapshotCreationTimestamp":{"shape":"Timestamp"},
+ "RuntimeEnvironment":{"shape":"RuntimeEnvironment"}
}
},
"SnapshotName":{
@@ -2643,7 +2644,8 @@
"ServiceExecutionRoleUpdate":{"shape":"RoleARN"},
"RunConfigurationUpdate":{"shape":"RunConfigurationUpdate"},
"CloudWatchLoggingOptionUpdates":{"shape":"CloudWatchLoggingOptionUpdates"},
- "ConditionalToken":{"shape":"ConditionalToken"}
+ "ConditionalToken":{"shape":"ConditionalToken"},
+ "RuntimeEnvironmentUpdate":{"shape":"RuntimeEnvironment"}
}
},
"UpdateApplicationResponse":{
diff --git a/models/apis/kinesisanalyticsv2/2018-05-23/docs-2.json b/models/apis/kinesisanalyticsv2/2018-05-23/docs-2.json
index 7a740d2337b..f33f166c6dc 100644
--- a/models/apis/kinesisanalyticsv2/2018-05-23/docs-2.json
+++ b/models/apis/kinesisanalyticsv2/2018-05-23/docs-2.json
@@ -1,38 +1,38 @@
{
"version": "2.0",
- "service": "Amazon Kinesis Data Analytics is a fully managed service that you can use to process and analyze streaming data using Java, SQL, or Scala. The service enables you to quickly author and run Java, SQL, or Scala code against streaming sources to perform time series analytics, feed real-time dashboards, and create real-time metrics.
",
+ "service": " Amazon Managed Service for Apache Flink was previously known as Amazon Kinesis Data Analytics for Apache Flink.
Amazon Managed Service for Apache Flink is a fully managed service that you can use to process and analyze streaming data using Java, Python, SQL, or Scala. The service enables you to quickly author and run Java, SQL, or Scala code against streaming sources to perform time series analytics, feed real-time dashboards, and create real-time metrics.
",
"operations": {
"AddApplicationCloudWatchLoggingOption": "Adds an Amazon CloudWatch log stream to monitor application configuration errors.
",
"AddApplicationInput": " Adds a streaming source to your SQL-based Kinesis Data Analytics application.
You can add a streaming source when you create an application, or you can use this operation to add a streaming source after you create an application. For more information, see CreateApplication.
Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.
",
"AddApplicationInputProcessingConfiguration": "Adds an InputProcessingConfiguration to a SQL-based Kinesis Data Analytics application. An input processor pre-processes records on the input stream before the application's SQL code executes. Currently, the only input processor available is Amazon Lambda.
",
"AddApplicationOutput": "Adds an external destination to your SQL-based Kinesis Data Analytics application.
If you want Kinesis Data Analytics to deliver data from an in-application stream within your application to an external destination (such as an Kinesis data stream, a Kinesis Data Firehose delivery stream, or an Amazon Lambda function), you add the relevant configuration to your application using this operation. You can configure one or more outputs for your application. Each output configuration maps an in-application stream and an external destination.
You can use one of the output configurations to deliver data from your in-application error stream to an external destination so that you can analyze the errors.
Any configuration update, including adding a streaming source using this operation, results in a new version of the application. You can use the DescribeApplication operation to find the current application version.
",
"AddApplicationReferenceDataSource": "Adds a reference data source to an existing SQL-based Kinesis Data Analytics application.
Kinesis Data Analytics reads reference data (that is, an Amazon S3 object) and creates an in-application table within your application. In the request, you provide the source (S3 bucket name and object key name), name of the in-application table to create, and the necessary mapping information that describes how data in an Amazon S3 object maps to columns in the resulting in-application table.
",
- "AddApplicationVpcConfiguration": "Adds a Virtual Private Cloud (VPC) configuration to the application. Applications can use VPCs to store and access resources securely.
Note the following about VPC configurations for Kinesis Data Analytics applications:
-
VPC configurations are not supported for SQL applications.
-
When a VPC is added to a Kinesis Data Analytics application, the application can no longer be accessed from the Internet directly. To enable Internet access to the application, add an Internet gateway to your VPC.
",
- "CreateApplication": "Creates a Kinesis Data Analytics application. For information about creating a Kinesis Data Analytics application, see Creating an Application.
",
+ "AddApplicationVpcConfiguration": "Adds a Virtual Private Cloud (VPC) configuration to the application. Applications can use VPCs to store and access resources securely.
Note the following about VPC configurations for Managed Service for Apache Flink applications:
-
VPC configurations are not supported for SQL applications.
-
When a VPC is added to a Managed Service for Apache Flink application, the application can no longer be accessed from the Internet directly. To enable Internet access to the application, add an Internet gateway to your VPC.
",
+ "CreateApplication": "Creates a Managed Service for Apache Flink application. For information about creating a Managed Service for Apache Flink application, see Creating an Application.
",
"CreateApplicationPresignedUrl": "Creates and returns a URL that you can use to connect to an application's extension.
The IAM role or user used to call this API defines the permissions to access the extension. After the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request that attempts to connect to the extension.
You control the amount of time that the URL will be valid using the SessionExpirationDurationInSeconds
parameter. If you do not provide this parameter, the returned URL is valid for twelve hours.
The URL that you get from a call to CreateApplicationPresignedUrl must be used within 3 minutes to be valid. If you first try to use the URL after the 3-minute limit expires, the service returns an HTTP 403 Forbidden error.
",
"CreateApplicationSnapshot": "Creates a snapshot of the application's state data.
",
- "DeleteApplication": "Deletes the specified application. Kinesis Data Analytics halts application execution and deletes the application.
",
- "DeleteApplicationCloudWatchLoggingOption": "Deletes an Amazon CloudWatch log stream from an Kinesis Data Analytics application.
",
+ "DeleteApplication": "Deletes the specified application. Managed Service for Apache Flink halts application execution and deletes the application.
",
+ "DeleteApplicationCloudWatchLoggingOption": "Deletes an Amazon CloudWatch log stream from an SQL-based Kinesis Data Analytics application.
",
"DeleteApplicationInputProcessingConfiguration": "Deletes an InputProcessingConfiguration from an input.
",
"DeleteApplicationOutput": "Deletes the output destination configuration from your SQL-based Kinesis Data Analytics application's configuration. Kinesis Data Analytics will no longer write data from the corresponding in-application stream to the external output destination.
",
"DeleteApplicationReferenceDataSource": "Deletes a reference data source configuration from the specified SQL-based Kinesis Data Analytics application's configuration.
If the application is running, Kinesis Data Analytics immediately removes the in-application table that you created using the AddApplicationReferenceDataSource operation.
",
"DeleteApplicationSnapshot": "Deletes a snapshot of application state.
",
- "DeleteApplicationVpcConfiguration": "Removes a VPC configuration from a Kinesis Data Analytics application.
",
- "DescribeApplication": "Returns information about a specific Kinesis Data Analytics application.
If you want to retrieve a list of all applications in your account, use the ListApplications operation.
",
+ "DeleteApplicationVpcConfiguration": "Removes a VPC configuration from a Managed Service for Apache Flink application.
",
+ "DescribeApplication": "Returns information about a specific Managed Service for Apache Flink application.
If you want to retrieve a list of all applications in your account, use the ListApplications operation.
",
"DescribeApplicationSnapshot": "Returns information about a snapshot of application state data.
",
- "DescribeApplicationVersion": "Provides a detailed description of a specified version of the application. To see a list of all the versions of an application, invoke the ListApplicationVersions operation.
This operation is supported only for Amazon Kinesis Data Analytics for Apache Flink.
",
+ "DescribeApplicationVersion": "Provides a detailed description of a specified version of the application. To see a list of all the versions of an application, invoke the ListApplicationVersions operation.
This operation is supported only for Managed Service for Apache Flink.
",
"DiscoverInputSchema": "Infers a schema for a SQL-based Kinesis Data Analytics application by evaluating sample records on the specified streaming source (Kinesis data stream or Kinesis Data Firehose delivery stream) or Amazon S3 object. In the response, the operation returns the inferred schema and also the sample records that the operation used to infer the schema.
You can use the inferred schema when configuring a streaming source for your application. When you create an application using the Kinesis Data Analytics console, the console uses this operation to infer a schema and show it in the console user interface.
",
"ListApplicationSnapshots": "Lists information about the current application snapshots.
",
- "ListApplicationVersions": "Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version.
To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation.
This operation is supported only for Amazon Kinesis Data Analytics for Apache Flink.
",
- "ListApplications": "Returns a list of Kinesis Data Analytics applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status.
If you want detailed information about a specific application, use DescribeApplication.
",
+ "ListApplicationVersions": "Lists all the versions for the specified application, including versions that were rolled back. The response also includes a summary of the configuration associated with each version.
To get the complete description of a specific application version, invoke the DescribeApplicationVersion operation.
This operation is supported only for Managed Service for Apache Flink.
",
+ "ListApplications": "Returns a list of Managed Service for Apache Flink applications in your account. For each application, the response includes the application name, Amazon Resource Name (ARN), and status.
If you want detailed information about a specific application, use DescribeApplication.
",
"ListTagsForResource": "Retrieves the list of key-value tags assigned to the application. For more information, see Using Tagging.
",
- "RollbackApplication": "Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status.
You can roll back an application only if it is in the UPDATING
or AUTOSCALING
status.
When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Kinesis Data Analytics rejects the rollback request.
This action is not supported for Kinesis Data Analytics for SQL applications.
",
- "StartApplication": "Starts the specified Kinesis Data Analytics application. After creating an application, you must exclusively call this operation to start your application.
",
- "StopApplication": "Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force
parameter to true
.
You can use the DescribeApplication operation to find the application status.
Kinesis Data Analytics takes a snapshot when the application is stopped, unless Force
is set to true
.
",
- "TagResource": "Adds one or more key-value tags to a Kinesis Data Analytics application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.
",
- "UntagResource": "Removes one or more tags from a Kinesis Data Analytics application. For more information, see Using Tagging.
",
- "UpdateApplication": "Updates an existing Kinesis Data Analytics application. Using this operation, you can update application code, input configuration, and output configuration.
Kinesis Data Analytics updates the ApplicationVersionId
each time you update your application.
You cannot update the RuntimeEnvironment
of an existing application. If you need to update an application's RuntimeEnvironment
, you must delete the application and create it again.
",
- "UpdateApplicationMaintenanceConfiguration": "Updates the maintenance configuration of the Kinesis Data Analytics application.
You can invoke this operation on an application that is in one of the two following states: READY
or RUNNING
. If you invoke it when the application is in a state other than these two states, it throws a ResourceInUseException
. The service makes use of the updated configuration the next time it schedules maintenance for the application. If you invoke this operation after the service schedules maintenance, the service will apply the configuration update the next time it schedules maintenance for the application. This means that you might not see the maintenance configuration update applied to the maintenance process that follows a successful invocation of this operation, but to the following maintenance process instead.
To see the current maintenance configuration of your application, invoke the DescribeApplication operation.
For information about application maintenance, see Kinesis Data Analytics for Apache Flink Maintenance.
This operation is supported only for Amazon Kinesis Data Analytics for Apache Flink.
"
+ "RollbackApplication": "Reverts the application to the previous running version. You can roll back an application if you suspect it is stuck in a transient status.
You can roll back an application only if it is in the UPDATING
or AUTOSCALING
status.
When you rollback an application, it loads state data from the last successful snapshot. If the application has no snapshots, Managed Service for Apache Flink rejects the rollback request.
This action is not supported for Managed Service for Apache Flink for SQL applications.
",
+ "StartApplication": "Starts the specified Managed Service for Apache Flink application. After creating an application, you must exclusively call this operation to start your application.
",
+ "StopApplication": "Stops the application from processing data. You can stop an application only if it is in the running status, unless you set the Force
parameter to true
.
You can use the DescribeApplication operation to find the application status.
Managed Service for Apache Flink takes a snapshot when the application is stopped, unless Force
is set to true
.
",
+ "TagResource": "Adds one or more key-value tags to a Managed Service for Apache Flink application. Note that the maximum number of application tags includes system tags. The maximum number of user-defined application tags is 50. For more information, see Using Tagging.
",
+ "UntagResource": "Removes one or more tags from a Managed Service for Apache Flink application. For more information, see Using Tagging.
",
+ "UpdateApplication": "Updates an existing Managed Service for Apache Flink application. Using this operation, you can update application code, input configuration, and output configuration.
Managed Service for Apache Flink updates the ApplicationVersionId
each time you update your application.
",
+ "UpdateApplicationMaintenanceConfiguration": "Updates the maintenance configuration of the Managed Service for Apache Flink application.
You can invoke this operation on an application that is in one of the two following states: READY
or RUNNING
. If you invoke it when the application is in a state other than these two states, it throws a ResourceInUseException
. The service makes use of the updated configuration the next time it schedules maintenance for the application. If you invoke this operation after the service schedules maintenance, the service will apply the configuration update the next time it schedules maintenance for the application. This means that you might not see the maintenance configuration update applied to the maintenance process that follows a successful invocation of this operation, but to the following maintenance process instead.
To see the current maintenance configuration of your application, invoke the DescribeApplication operation.
For information about application maintenance, see Managed Service for Apache Flink for Apache Flink Maintenance.
This operation is supported only for Managed Service for Apache Flink.
"
},
"shapes": {
"AddApplicationCloudWatchLoggingOptionRequest": {
@@ -98,31 +98,31 @@
"ApplicationCodeConfiguration": {
"base": "Describes code configuration for an application.
",
"refs": {
- "ApplicationConfiguration$ApplicationCodeConfiguration": "The code location and type parameters for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfiguration$ApplicationCodeConfiguration": "The code location and type parameters for a Managed Service for Apache Flink application.
"
}
},
"ApplicationCodeConfigurationDescription": {
"base": "Describes code configuration for an application.
",
"refs": {
- "ApplicationConfigurationDescription$ApplicationCodeConfigurationDescription": "The details about the application code for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationDescription$ApplicationCodeConfigurationDescription": "The details about the application code for a Managed Service for Apache Flink application.
"
}
},
"ApplicationCodeConfigurationUpdate": {
- "base": "Describes code configuration updates for an application. This is supported for a Flink-based Kinesis Data Analytics application or a SQL-based Kinesis Data Analytics application.
",
+ "base": "Describes code configuration updates for an application. This is supported for a Managed Service for Apache Flink application or a SQL-based Kinesis Data Analytics application.
",
"refs": {
"ApplicationConfigurationUpdate$ApplicationCodeConfigurationUpdate": "Describes updates to an application's code configuration.
"
}
},
"ApplicationConfiguration": {
- "base": "Specifies the creation parameters for a Kinesis Data Analytics application.
",
+ "base": "Specifies the creation parameters for a Managed Service for Apache Flink application.
",
"refs": {
"CreateApplicationRequest$ApplicationConfiguration": "Use this parameter to configure the application.
"
}
},
"ApplicationConfigurationDescription": {
- "base": "Describes details about the application code and starting parameters for a Kinesis Data Analytics application.
",
+ "base": "Describes details about the application code and starting parameters for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationDetail$ApplicationConfigurationDescription": "Describes details about the application code and starting parameters for a Kinesis Data Analytics application.
"
+ "ApplicationDetail$ApplicationConfigurationDescription": "Describes details about the application code and starting parameters for a Managed Service for Apache Flink application.
"
}
},
"ApplicationConfigurationUpdate": {
@@ -141,7 +141,7 @@
"ApplicationDetail": {
"base": "Describes the application, including the application Amazon Resource Name (ARN), status, latest version, and input and output configurations.
",
"refs": {
- "CreateApplicationResponse$ApplicationDetail": "In response to your CreateApplication
request, Kinesis Data Analytics returns a response with details of the application it created.
",
+ "CreateApplicationResponse$ApplicationDetail": "In response to your CreateApplication
request, Managed Service for Apache Flink returns a response with details of the application it created.
",
"DescribeApplicationResponse$ApplicationDetail": "Provides a description of the application, such as the application's Amazon Resource Name (ARN), status, and latest version.
",
"DescribeApplicationVersionResponse$ApplicationVersionDetail": null,
"RollbackApplicationResponse$ApplicationDetail": null,
@@ -177,9 +177,9 @@
"ApplicationMode": {
"base": null,
"refs": {
- "ApplicationDetail$ApplicationMode": "To create a Kinesis Data Analytics Studio notebook, you must set the mode to INTERACTIVE
. However, for a Kinesis Data Analytics for Apache Flink application, the mode is optional.
",
- "ApplicationSummary$ApplicationMode": "For a Kinesis Data Analytics for Apache Flink application, the mode is STREAMING
. For a Kinesis Data Analytics Studio notebook, it is INTERACTIVE
.
",
- "CreateApplicationRequest$ApplicationMode": "Use the STREAMING
mode to create a Kinesis Data Analytics For Flink application. To create a Kinesis Data Analytics Studio notebook, use the INTERACTIVE
mode.
"
+ "ApplicationDetail$ApplicationMode": "To create a Managed Service for Apache Flink Studio notebook, you must set the mode to INTERACTIVE
. However, for a Managed Service for Apache Flink application, the mode is optional.
",
+ "ApplicationSummary$ApplicationMode": "For a Managed Service for Apache Flink application, the mode is STREAMING
. For a Managed Service for Apache Flink Studio notebook, it is INTERACTIVE
.
",
+ "CreateApplicationRequest$ApplicationMode": "Use the STREAMING
mode to create a Managed Service for Apache Flink application. To create a Managed Service for Apache Flink Studio notebook, use the INTERACTIVE
mode.
"
}
},
"ApplicationName": {
@@ -232,21 +232,21 @@
}
},
"ApplicationSnapshotConfiguration": {
- "base": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfiguration$ApplicationSnapshotConfiguration": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfiguration$ApplicationSnapshotConfiguration": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
"
}
},
"ApplicationSnapshotConfigurationDescription": {
- "base": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfigurationDescription$ApplicationSnapshotConfigurationDescription": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationDescription$ApplicationSnapshotConfigurationDescription": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
"
}
},
"ApplicationSnapshotConfigurationUpdate": {
- "base": "Describes updates to whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes updates to whether snapshots are enabled for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfigurationUpdate$ApplicationSnapshotConfigurationUpdate": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationUpdate$ApplicationSnapshotConfigurationUpdate": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
"
}
},
"ApplicationStatus": {
@@ -272,8 +272,8 @@
"ApplicationVersionId": {
"base": null,
"refs": {
- "AddApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "The version ID of the Kinesis Data Analytics application. You must provide the CurrentApplicationVersionId
or the ConditionalToken
.You can retrieve the application version ID using DescribeApplication. For better concurrency support, use the ConditionalToken
parameter instead of CurrentApplicationVersionId
.
",
- "AddApplicationCloudWatchLoggingOptionResponse$ApplicationVersionId": "The new version ID of the Kinesis Data Analytics application. Kinesis Data Analytics updates the ApplicationVersionId
each time you change the CloudWatch logging options.
",
+ "AddApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "The version ID of the SQL-based Kinesis Data Analytics application. You must provide the CurrentApplicationVersionId
or the ConditionalToken
.You can retrieve the application version ID using DescribeApplication. For better concurrency support, use the ConditionalToken
parameter instead of CurrentApplicationVersionId
.
",
+ "AddApplicationCloudWatchLoggingOptionResponse$ApplicationVersionId": "The new version ID of the SQL-based Kinesis Data Analytics application. Kinesis Data Analytics updates the ApplicationVersionId
each time you change the CloudWatch logging options.
",
"AddApplicationInputProcessingConfigurationRequest$CurrentApplicationVersionId": "The version of the application to which you want to add the input processing configuration. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException
is returned.
",
"AddApplicationInputProcessingConfigurationResponse$ApplicationVersionId": "Provides the current application version.
",
"AddApplicationInputRequest$CurrentApplicationVersionId": "The current version of your application. You must provide the ApplicationVersionID
or the ConditionalToken
.You can use the DescribeApplication operation to find the current application version.
",
@@ -283,13 +283,13 @@
"AddApplicationReferenceDataSourceRequest$CurrentApplicationVersionId": "The version of the application for which you are adding the reference data source. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException
is returned.
",
"AddApplicationReferenceDataSourceResponse$ApplicationVersionId": "The updated application version ID. Kinesis Data Analytics increments this ID when the application is updated.
",
"AddApplicationVpcConfigurationRequest$CurrentApplicationVersionId": "The version of the application to which you want to add the VPC configuration. You must provide the CurrentApplicationVersionId
or the ConditionalToken
. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException
is returned. For better concurrency support, use the ConditionalToken
parameter instead of CurrentApplicationVersionId
.
",
- "AddApplicationVpcConfigurationResponse$ApplicationVersionId": "Provides the current application version. Kinesis Data Analytics updates the ApplicationVersionId each time you update the application.
",
- "ApplicationDetail$ApplicationVersionId": "Provides the current application version. Kinesis Data Analytics updates the ApplicationVersionId
each time you update the application.
",
+ "AddApplicationVpcConfigurationResponse$ApplicationVersionId": "Provides the current application version. Managed Service for Apache Flink updates the ApplicationVersionId each time you update the application.
",
+ "ApplicationDetail$ApplicationVersionId": "Provides the current application version. Managed Service for Apache Flink updates the ApplicationVersionId
each time you update the application.
",
"ApplicationDetail$ApplicationVersionUpdatedFrom": "The previous application version before the latest application update. RollbackApplication reverts the application to this version.
",
"ApplicationDetail$ApplicationVersionRolledBackFrom": "If you reverted the application using RollbackApplication, the application version when RollbackApplication
was called.
",
"ApplicationDetail$ApplicationVersionRolledBackTo": "The version to which you want to roll back the application.
",
"ApplicationSummary$ApplicationVersionId": "Provides the current application version.
",
- "ApplicationVersionSummary$ApplicationVersionId": "The ID of the application version. Kinesis Data Analytics updates the ApplicationVersionId
each time you update the application.
",
+ "ApplicationVersionSummary$ApplicationVersionId": "The ID of the application version. Managed Service for Apache Flink updates the ApplicationVersionId
each time you update the application.
",
"DeleteApplicationCloudWatchLoggingOptionRequest$CurrentApplicationVersionId": "The version ID of the application. You must provide the CurrentApplicationVersionId
or the ConditionalToken
. You can retrieve the application version ID using DescribeApplication. For better concurrency support, use the ConditionalToken
parameter instead of CurrentApplicationVersionId
.
",
"DeleteApplicationCloudWatchLoggingOptionResponse$ApplicationVersionId": "The version ID of the application. Kinesis Data Analytics updates the ApplicationVersionId
each time you change the CloudWatch logging options.
",
"DeleteApplicationInputProcessingConfigurationRequest$CurrentApplicationVersionId": "The application version. You can use the DescribeApplication operation to get the current application version. If the version specified is not the current version, the ConcurrentModificationException
is returned.
",
@@ -342,18 +342,18 @@
"BooleanObject": {
"base": null,
"refs": {
- "ApplicationSnapshotConfiguration$SnapshotsEnabled": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
",
- "ApplicationSnapshotConfigurationDescription$SnapshotsEnabled": "Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics application.
",
+ "ApplicationSnapshotConfiguration$SnapshotsEnabled": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
",
+ "ApplicationSnapshotConfigurationDescription$SnapshotsEnabled": "Describes whether snapshots are enabled for a Managed Service for Apache Flink application.
",
"ApplicationSnapshotConfigurationUpdate$SnapshotsEnabledUpdate": "Describes updates to whether snapshots are enabled for an application.
",
- "CheckpointConfiguration$CheckpointingEnabled": "Describes whether checkpointing is enabled for a Flink-based Kinesis Data Analytics application.
If CheckpointConfiguration.ConfigurationType
is DEFAULT
, the application will use a CheckpointingEnabled
value of true
, even if this value is set to another value using this API or in application code.
",
- "CheckpointConfigurationDescription$CheckpointingEnabled": "Describes whether checkpointing is enabled for a Flink-based Kinesis Data Analytics application.
If CheckpointConfiguration.ConfigurationType
is DEFAULT
, the application will use a CheckpointingEnabled
value of true
, even if this value is set to another value using this API or in application code.
",
+ "CheckpointConfiguration$CheckpointingEnabled": "Describes whether checkpointing is enabled for a Managed Service for Apache Flink application.
If CheckpointConfiguration.ConfigurationType
is DEFAULT
, the application will use a CheckpointingEnabled
value of true
, even if this value is set to another value using this API or in application code.
",
+ "CheckpointConfigurationDescription$CheckpointingEnabled": "Describes whether checkpointing is enabled for a Managed Service for Apache Flink application.
If CheckpointConfiguration.ConfigurationType
is DEFAULT
, the application will use a CheckpointingEnabled
value of true
, even if this value is set to another value using this API or in application code.
",
"CheckpointConfigurationUpdate$CheckpointingEnabledUpdate": "Describes updates to whether checkpointing is enabled for an application.
If CheckpointConfiguration.ConfigurationType
is DEFAULT
, the application will use a CheckpointingEnabled
value of true
, even if this value is set to another value using this API or in application code.
",
- "DescribeApplicationRequest$IncludeAdditionalDetails": "Displays verbose information about a Kinesis Data Analytics application, including the application's job plan.
",
+ "DescribeApplicationRequest$IncludeAdditionalDetails": "Displays verbose information about a Managed Service for Apache Flink application, including the application's job plan.
",
"FlinkRunConfiguration$AllowNonRestoredState": "When restoring from a snapshot, specifies whether the runtime is allowed to skip a state that cannot be mapped to the new program. This will happen if the program is updated between snapshots to remove stateful parameters, and state data in the snapshot no longer corresponds to valid application data. For more information, see Allowing Non-Restored State in the Apache Flink documentation.
This value defaults to false
. If you update your application without specifying this parameter, AllowNonRestoredState
will be set to false
, even if it was previously set to true
.
",
- "ParallelismConfiguration$AutoScalingEnabled": "Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput.
",
- "ParallelismConfigurationDescription$AutoScalingEnabled": "Describes whether the Kinesis Data Analytics service can increase the parallelism of the application in response to increased throughput.
",
- "ParallelismConfigurationUpdate$AutoScalingEnabledUpdate": "Describes updates to whether the Kinesis Data Analytics service can increase the parallelism of a Flink-based Kinesis Data Analytics application in response to increased throughput.
",
- "StopApplicationRequest$Force": "Set to true
to force the application to stop. If you set Force
to true
, Kinesis Data Analytics stops the application without taking a snapshot.
Force-stopping your application may lead to data loss or duplication. To prevent data loss or duplicate processing of data during application restarts, we recommend you to take frequent snapshots of your application.
You can only force stop a Flink-based Kinesis Data Analytics application. You can't force stop a SQL-based Kinesis Data Analytics application.
The application must be in the STARTING
, UPDATING
, STOPPING
, AUTOSCALING
, or RUNNING
status.
"
+ "ParallelismConfiguration$AutoScalingEnabled": "Describes whether the Managed Service for Apache Flink service can increase the parallelism of the application in response to increased throughput.
",
+ "ParallelismConfigurationDescription$AutoScalingEnabled": "Describes whether the Managed Service for Apache Flink service can increase the parallelism of the application in response to increased throughput.
",
+ "ParallelismConfigurationUpdate$AutoScalingEnabledUpdate": "Describes updates to whether the Managed Service for Apache Flink service can increase the parallelism of a Managed Service for Apache Flink application in response to increased throughput.
",
+ "StopApplicationRequest$Force": "Set to true
to force the application to stop. If you set Force
to true
, Managed Service for Apache Flink stops the application without taking a snapshot.
Force-stopping your application may lead to data loss or duplication. To prevent data loss or duplicate processing of data during application restarts, we recommend you to take frequent snapshots of your application.
You can only force stop a Managed Service for Apache Flink application. You can't force stop a SQL-based Kinesis Data Analytics application.
The application must be in the STARTING
, UPDATING
, STOPPING
, AUTOSCALING
, or RUNNING
status.
"
}
},
"BucketARN": {
@@ -378,21 +378,21 @@
}
},
"CatalogConfiguration": {
- "base": "The configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
",
+ "base": "The configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ZeppelinApplicationConfiguration$CatalogConfiguration": "The Amazon Glue Data Catalog that you use in queries in a Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfiguration$CatalogConfiguration": "The Amazon Glue Data Catalog that you use in queries in a Managed Service for Apache Flink Studio notebook.
"
}
},
"CatalogConfigurationDescription": {
- "base": "The configuration parameters for the default Amazon Glue database. You use this database for Apache Flink SQL queries and table API transforms that you write in a Kinesis Data Analytics Studio notebook.
",
+ "base": "The configuration parameters for the default Amazon Glue database. You use this database for Apache Flink SQL queries and table API transforms that you write in a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ZeppelinApplicationConfigurationDescription$CatalogConfigurationDescription": "The Amazon Glue Data Catalog that is associated with the Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfigurationDescription$CatalogConfigurationDescription": "The Amazon Glue Data Catalog that is associated with the Managed Service for Apache Flink Studio notebook.
"
}
},
"CatalogConfigurationUpdate": {
- "base": "Updates to the configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
",
+ "base": "Updates to the configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ZeppelinApplicationConfigurationUpdate$CatalogConfigurationUpdate": "Updates to the configuration of the Amazon Glue Data Catalog that is associated with the Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfigurationUpdate$CatalogConfigurationUpdate": "Updates to the configuration of the Amazon Glue Data Catalog that is associated with the Managed Service for Apache Flink Studio notebook.
"
}
},
"CheckpointConfiguration": {
@@ -402,13 +402,13 @@
}
},
"CheckpointConfigurationDescription": {
- "base": "Describes checkpointing parameters for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes checkpointing parameters for a Managed Service for Apache Flink application.
",
"refs": {
"FlinkApplicationConfigurationDescription$CheckpointConfigurationDescription": "Describes an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance.
"
}
},
"CheckpointConfigurationUpdate": {
- "base": "Describes updates to the checkpointing parameters for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes updates to the checkpointing parameters for a Managed Service for Apache Flink application.
",
"refs": {
"FlinkApplicationConfigurationUpdate$CheckpointConfigurationUpdate": "Describes updates to an application's checkpointing configuration. Checkpointing is the process of persisting application state for fault tolerance.
"
}
@@ -437,7 +437,7 @@
"CloudWatchLoggingOptionDescriptions": {
"base": null,
"refs": {
- "AddApplicationCloudWatchLoggingOptionResponse$CloudWatchLoggingOptionDescriptions": "The descriptions of the current CloudWatch logging options for the Kinesis Data Analytics application.
",
+ "AddApplicationCloudWatchLoggingOptionResponse$CloudWatchLoggingOptionDescriptions": "The descriptions of the current CloudWatch logging options for the SQL-based Kinesis Data Analytics application.
",
"ApplicationDetail$CloudWatchLoggingOptionDescriptions": "Describes the application Amazon CloudWatch logging options.
",
"DeleteApplicationCloudWatchLoggingOptionResponse$CloudWatchLoggingOptionDescriptions": "The descriptions of the remaining CloudWatch logging options for the application.
"
}
@@ -461,13 +461,13 @@
}
},
"CodeContent": {
- "base": "Specifies either the application code, or the location of the application code, for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Specifies either the application code, or the location of the application code, for a Managed Service for Apache Flink application.
",
"refs": {
"ApplicationCodeConfiguration$CodeContent": "The location and type of the application code.
"
}
},
"CodeContentDescription": {
- "base": "Describes details about the code of a Kinesis Data Analytics application.
",
+ "base": "Describes details about the code of a Managed Service for Apache Flink application.
",
"refs": {
"ApplicationCodeConfigurationDescription$CodeContentDescription": "Describes details about the location and format of the application code.
"
}
@@ -522,15 +522,15 @@
"ConfigurationType": {
"base": null,
"refs": {
- "CheckpointConfiguration$ConfigurationType": "Describes whether the application uses Kinesis Data Analytics' default checkpointing behavior. You must set this property to CUSTOM
in order to set the CheckpointingEnabled
, CheckpointInterval
, or MinPauseBetweenCheckpoints
parameters.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
- "CheckpointConfigurationDescription$ConfigurationType": "Describes whether the application uses the default checkpointing behavior in Kinesis Data Analytics.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
- "CheckpointConfigurationUpdate$ConfigurationTypeUpdate": "Describes updates to whether the application uses the default checkpointing behavior of Kinesis Data Analytics. You must set this property to CUSTOM
in order to set the CheckpointingEnabled
, CheckpointInterval
, or MinPauseBetweenCheckpoints
parameters.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
+ "CheckpointConfiguration$ConfigurationType": "Describes whether the application uses Managed Service for Apache Flink' default checkpointing behavior. You must set this property to CUSTOM
in order to set the CheckpointingEnabled
, CheckpointInterval
, or MinPauseBetweenCheckpoints
parameters.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
+ "CheckpointConfigurationDescription$ConfigurationType": "Describes whether the application uses the default checkpointing behavior in Managed Service for Apache Flink.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
+ "CheckpointConfigurationUpdate$ConfigurationTypeUpdate": "Describes updates to whether the application uses the default checkpointing behavior of Managed Service for Apache Flink. You must set this property to CUSTOM
in order to set the CheckpointingEnabled
, CheckpointInterval
, or MinPauseBetweenCheckpoints
parameters.
If this value is set to DEFAULT
, the application will use the following values, even if they are set to other values using APIs or application code:
-
CheckpointingEnabled: true
-
CheckpointInterval: 60000
-
MinPauseBetweenCheckpoints: 5000
",
"MonitoringConfiguration$ConfigurationType": "Describes whether to use the default CloudWatch logging configuration for an application. You must set this property to CUSTOM
in order to set the LogLevel
or MetricsLevel
parameters.
",
"MonitoringConfigurationDescription$ConfigurationType": "Describes whether to use the default CloudWatch logging configuration for an application.
",
"MonitoringConfigurationUpdate$ConfigurationTypeUpdate": "Describes updates to whether to use the default CloudWatch logging configuration for an application. You must set this property to CUSTOM
in order to set the LogLevel
or MetricsLevel
parameters.
",
- "ParallelismConfiguration$ConfigurationType": "Describes whether the application uses the default parallelism for the Kinesis Data Analytics service. You must set this property to CUSTOM
in order to change your application's AutoScalingEnabled
, Parallelism
, or ParallelismPerKPU
properties.
",
- "ParallelismConfigurationDescription$ConfigurationType": "Describes whether the application uses the default parallelism for the Kinesis Data Analytics service.
",
- "ParallelismConfigurationUpdate$ConfigurationTypeUpdate": "Describes updates to whether the application uses the default parallelism for the Kinesis Data Analytics service, or if a custom parallelism is used. You must set this property to CUSTOM
in order to change your application's AutoScalingEnabled
, Parallelism
, or ParallelismPerKPU
properties.
"
+ "ParallelismConfiguration$ConfigurationType": "Describes whether the application uses the default parallelism for the Managed Service for Apache Flink service. You must set this property to CUSTOM
in order to change your application's AutoScalingEnabled
, Parallelism
, or ParallelismPerKPU
properties.
",
+ "ParallelismConfigurationDescription$ConfigurationType": "Describes whether the application uses the default parallelism for the Managed Service for Apache Flink service.
",
+ "ParallelismConfigurationUpdate$ConfigurationTypeUpdate": "Describes updates to whether the application uses the default parallelism for the Managed Service for Apache Flink service, or if a custom parallelism is used. You must set this property to CUSTOM
in order to change your application's AutoScalingEnabled
, Parallelism
, or ParallelismPerKPU
properties.
"
}
},
"CreateApplicationPresignedUrlRequest": {
@@ -667,15 +667,15 @@
}
},
"DeployAsApplicationConfiguration": {
- "base": "The information required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state.
",
+ "base": "The information required to deploy a Managed Service for Apache Flink Studio notebook as an application with durable state.
",
"refs": {
- "ZeppelinApplicationConfiguration$DeployAsApplicationConfiguration": "The information required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state.
"
+ "ZeppelinApplicationConfiguration$DeployAsApplicationConfiguration": "The information required to deploy a Managed Service for Apache Flink Studio notebook as an application with durable state.
"
}
},
"DeployAsApplicationConfigurationDescription": {
"base": "The configuration information required to deploy an Amazon Data Analytics Studio notebook as an application with durable state.
",
"refs": {
- "ZeppelinApplicationConfigurationDescription$DeployAsApplicationConfigurationDescription": "The parameters required to deploy a Kinesis Data Analytics Studio notebook as an application with durable state.
"
+ "ZeppelinApplicationConfigurationDescription$DeployAsApplicationConfigurationDescription": "The parameters required to deploy a Managed Service for Apache Flink Studio notebook as an application with durable state.
"
}
},
"DeployAsApplicationConfigurationUpdate": {
@@ -733,21 +733,21 @@
}
},
"EnvironmentProperties": {
- "base": "Describes execution properties for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes execution properties for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfiguration$EnvironmentProperties": "Describes execution properties for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfiguration$EnvironmentProperties": "Describes execution properties for a Managed Service for Apache Flink application.
"
}
},
"EnvironmentPropertyDescriptions": {
"base": "Describes the execution properties for an Apache Flink runtime.
",
"refs": {
- "ApplicationConfigurationDescription$EnvironmentPropertyDescriptions": "Describes execution properties for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationDescription$EnvironmentPropertyDescriptions": "Describes execution properties for a Managed Service for Apache Flink application.
"
}
},
"EnvironmentPropertyUpdates": {
- "base": "Describes updates to the execution property groups for a Flink-based Kinesis Data Analytics application or a Studio notebook.
",
+ "base": "Describes updates to the execution property groups for a Managed Service for Apache Flink application or a Studio notebook.
",
"refs": {
- "ApplicationConfigurationUpdate$EnvironmentPropertyUpdates": "Describes updates to the environment properties for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationUpdate$EnvironmentPropertyUpdates": "Describes updates to the environment properties for a Managed Service for Apache Flink application.
"
}
},
"ErrorMessage": {
@@ -781,47 +781,47 @@
}
},
"FlinkApplicationConfiguration": {
- "base": "Describes configuration parameters for a Flink-based Kinesis Data Analytics application or a Studio notebook.
",
+ "base": "Describes configuration parameters for a Managed Service for Apache Flink application or a Studio notebook.
",
"refs": {
- "ApplicationConfiguration$FlinkApplicationConfiguration": "The creation and update parameters for a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfiguration$FlinkApplicationConfiguration": "The creation and update parameters for a Managed Service for Apache Flink application.
"
}
},
"FlinkApplicationConfigurationDescription": {
- "base": "Describes configuration parameters for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes configuration parameters for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfigurationDescription$FlinkApplicationConfigurationDescription": "The details about a Flink-based Kinesis Data Analytics application.
"
+ "ApplicationConfigurationDescription$FlinkApplicationConfigurationDescription": "The details about a Managed Service for Apache Flink application.
"
}
},
"FlinkApplicationConfigurationUpdate": {
- "base": "Describes updates to the configuration parameters for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes updates to the configuration parameters for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfigurationUpdate$FlinkApplicationConfigurationUpdate": "Describes updates to a Flink-based Kinesis Data Analytics application's configuration.
"
+ "ApplicationConfigurationUpdate$FlinkApplicationConfigurationUpdate": "Describes updates to a Managed Service for Apache Flink application's configuration.
"
}
},
"FlinkRunConfiguration": {
- "base": "Describes the starting parameters for a Flink-based Kinesis Data Analytics application.
",
+ "base": "Describes the starting parameters for a Managed Service for Apache Flink application.
",
"refs": {
- "RunConfiguration$FlinkRunConfiguration": "Describes the starting parameters for a Flink-based Kinesis Data Analytics application.
",
+ "RunConfiguration$FlinkRunConfiguration": "Describes the starting parameters for a Managed Service for Apache Flink application.
",
"RunConfigurationDescription$FlinkRunConfigurationDescription": null,
- "RunConfigurationUpdate$FlinkRunConfiguration": "Describes the starting parameters for a Flink-based Kinesis Data Analytics application.
"
+ "RunConfigurationUpdate$FlinkRunConfiguration": "Describes the starting parameters for a Managed Service for Apache Flink application.
"
}
},
"GlueDataCatalogConfiguration": {
"base": "The configuration of the Glue Data Catalog that you use for Apache Flink SQL queries and table API transforms that you write in an application.
",
"refs": {
- "CatalogConfiguration$GlueDataCatalogConfiguration": "The configuration parameters for the default Amazon Glue database. You use this database for Apache Flink SQL queries and table API transforms that you write in a Kinesis Data Analytics Studio notebook.
"
+ "CatalogConfiguration$GlueDataCatalogConfiguration": "The configuration parameters for the default Amazon Glue database. You use this database for Apache Flink SQL queries and table API transforms that you write in a Managed Service for Apache Flink Studio notebook.
"
}
},
"GlueDataCatalogConfigurationDescription": {
"base": "The configuration of the Glue Data Catalog that you use for Apache Flink SQL queries and table API transforms that you write in an application.
",
"refs": {
- "CatalogConfigurationDescription$GlueDataCatalogConfigurationDescription": "The configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
"
+ "CatalogConfigurationDescription$GlueDataCatalogConfigurationDescription": "The configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Managed Service for Apache Flink Studio notebook.
"
}
},
"GlueDataCatalogConfigurationUpdate": {
- "base": "Updates to the configuration of the Glue Data Catalog that you use for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
",
+ "base": "Updates to the configuration of the Glue Data Catalog that you use for SQL queries that you write in a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "CatalogConfigurationUpdate$GlueDataCatalogConfigurationUpdate": "Updates to the configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Kinesis Data Analytics Studio notebook.
"
+ "CatalogConfigurationUpdate$GlueDataCatalogConfigurationUpdate": "Updates to the configuration parameters for the default Amazon Glue database. You use this database for SQL queries that you write in a Managed Service for Apache Flink Studio notebook.
"
}
},
"Id": {
@@ -968,7 +968,7 @@
"InputStartingPositionConfiguration": {
"base": "Describes the point at which the application reads from the streaming source.
",
"refs": {
- "DiscoverInputSchemaRequest$InputStartingPositionConfiguration": "The point at which you want Kinesis Data Analytics to start reading records from the specified streaming source discovery purposes.
",
+ "DiscoverInputSchemaRequest$InputStartingPositionConfiguration": "The point at which you want Kinesis Data Analytics to start reading records from the specified streaming source for discovery purposes.
",
"InputDescription$InputStartingPositionConfiguration": "The point at which the application is configured to read from the input stream.
",
"SqlRunConfiguration$InputStartingPositionConfiguration": "The point at which you want the application to start processing records from the streaming source.
"
}
@@ -1023,7 +1023,7 @@
"refs": {
"ListTagsForResourceRequest$ResourceARN": "The ARN of the application for which to retrieve tags.
",
"TagResourceRequest$ResourceARN": "The ARN of the application to assign the tags.
",
- "UntagResourceRequest$ResourceARN": "The ARN of the Kinesis Data Analytics application from which to remove the tags.
"
+ "UntagResourceRequest$ResourceARN": "The ARN of the Managed Service for Apache Flink application from which to remove the tags.
"
}
},
"KinesisFirehoseInput": {
@@ -1187,7 +1187,7 @@
"MonitoringConfigurationUpdate$LogLevelUpdate": "Describes updates to the verbosity of the CloudWatch Logs for an application.
",
"ZeppelinMonitoringConfiguration$LogLevel": "The verbosity of the CloudWatch Logs for an application.
",
"ZeppelinMonitoringConfigurationDescription$LogLevel": "Describes the verbosity of the CloudWatch Logs for an application.
",
- "ZeppelinMonitoringConfigurationUpdate$LogLevelUpdate": "Updates to the logging level for Apache Zeppelin within a Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinMonitoringConfigurationUpdate$LogLevelUpdate": "Updates to the logging level for Apache Zeppelin within a Managed Service for Apache Flink Studio notebook.
"
}
},
"LogStreamARN": {
@@ -1321,20 +1321,20 @@
"Parallelism": {
"base": null,
"refs": {
- "ParallelismConfiguration$Parallelism": "Describes the initial number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. If AutoScalingEnabled
is set to True, Kinesis Data Analytics increases the CurrentParallelism
value in response to application load. The service can increase the CurrentParallelism
value up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
- "ParallelismConfigurationDescription$Parallelism": "Describes the initial number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. If AutoScalingEnabled
is set to True, then Kinesis Data Analytics can increase the CurrentParallelism
value in response to application load. The service can increase CurrentParallelism
up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
- "ParallelismConfigurationDescription$CurrentParallelism": "Describes the current number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform. If AutoScalingEnabled
is set to True, Kinesis Data Analytics can increase this value in response to application load. The service can increase this value up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
- "ParallelismConfigurationUpdate$ParallelismUpdate": "Describes updates to the initial number of parallel tasks an application can perform. If AutoScalingEnabled
is set to True, then Kinesis Data Analytics can increase the CurrentParallelism
value in response to application load. The service can increase CurrentParallelism
up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service will reduce CurrentParallelism
down to the Parallelism
setting.
"
+ "ParallelismConfiguration$Parallelism": "Describes the initial number of parallel tasks that a Managed Service for Apache Flink application can perform. If AutoScalingEnabled
is set to True, Managed Service for Apache Flink increases the CurrentParallelism
value in response to application load. The service can increase the CurrentParallelism
value up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
+ "ParallelismConfigurationDescription$Parallelism": "Describes the initial number of parallel tasks that a Managed Service for Apache Flink application can perform. If AutoScalingEnabled
is set to True, then Managed Service for Apache Flink can increase the CurrentParallelism
value in response to application load. The service can increase CurrentParallelism
up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
+ "ParallelismConfigurationDescription$CurrentParallelism": "Describes the current number of parallel tasks that a Managed Service for Apache Flink application can perform. If AutoScalingEnabled
is set to True, Managed Service for Apache Flink can increase this value in response to application load. The service can increase this value up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service can reduce the CurrentParallelism
value down to the Parallelism
setting.
",
+ "ParallelismConfigurationUpdate$ParallelismUpdate": "Describes updates to the initial number of parallel tasks an application can perform. If AutoScalingEnabled
is set to True, then Managed Service for Apache Flink can increase the CurrentParallelism
value in response to application load. The service can increase CurrentParallelism
up to the maximum parallelism, which is ParalellismPerKPU
times the maximum KPUs for the application. The maximum KPUs for an application is 32 by default, and can be increased by requesting a limit increase. If application load is reduced, the service will reduce CurrentParallelism
down to the Parallelism
setting.
"
}
},
"ParallelismConfiguration": {
- "base": "Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously. For more information about parallelism, see Parallel Execution in the Apache Flink Documentation.
",
+ "base": "Describes parameters for how a Managed Service for Apache Flink application executes multiple tasks simultaneously. For more information about parallelism, see Parallel Execution in the Apache Flink Documentation.
",
"refs": {
"FlinkApplicationConfiguration$ParallelismConfiguration": "Describes parameters for how an application executes multiple tasks simultaneously.
"
}
},
"ParallelismConfigurationDescription": {
- "base": "Describes parameters for how a Flink-based Kinesis Data Analytics application executes multiple tasks simultaneously.
",
+ "base": "Describes parameters for how a Managed Service for Apache Flink application executes multiple tasks simultaneously.
",
"refs": {
"FlinkApplicationConfigurationDescription$ParallelismConfigurationDescription": "Describes parameters for how an application executes multiple tasks simultaneously.
"
}
@@ -1348,8 +1348,8 @@
"ParallelismPerKPU": {
"base": null,
"refs": {
- "ParallelismConfiguration$ParallelismPerKPU": "Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application. For more information about KPUs, see Amazon Kinesis Data Analytics Pricing.
",
- "ParallelismConfigurationDescription$ParallelismPerKPU": "Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics application can perform per Kinesis Processing Unit (KPU) used by the application.
",
+ "ParallelismConfiguration$ParallelismPerKPU": "Describes the number of parallel tasks that a Managed Service for Apache Flink application can perform per Kinesis Processing Unit (KPU) used by the application. For more information about KPUs, see Amazon Managed Service for Apache Flink Pricing.
",
+ "ParallelismConfigurationDescription$ParallelismPerKPU": "Describes the number of parallel tasks that a Managed Service for Apache Flink application can perform per Kinesis Processing Unit (KPU) used by the application.
",
"ParallelismConfigurationUpdate$ParallelismPerKPUUpdate": "Describes updates to the number of parallel tasks an application can perform per Kinesis Processing Unit (KPU) used by the application.
"
}
},
@@ -1552,7 +1552,7 @@
"DeleteApplicationInputProcessingConfigurationResponse$ApplicationARN": "The Amazon Resource Name (ARN) of the application.
",
"DeleteApplicationOutputResponse$ApplicationARN": "The application Amazon Resource Name (ARN).
",
"DeleteApplicationReferenceDataSourceResponse$ApplicationARN": "The application Amazon Resource Name (ARN).
",
- "DeleteApplicationVpcConfigurationResponse$ApplicationARN": "The ARN of the Kinesis Data Analytics application.
",
+ "DeleteApplicationVpcConfigurationResponse$ApplicationARN": "The ARN of the Managed Service for Apache Flink application.
",
"DiscoverInputSchemaRequest$ResourceARN": "The Amazon Resource Name (ARN) of the streaming source.
",
"InputLambdaProcessor$ResourceARN": "The ARN of the Amazon Lambda function that operates on records in the stream.
To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: Amazon Lambda
",
"InputLambdaProcessorDescription$ResourceARN": "The ARN of the Amazon Lambda function that is used to preprocess the records in the stream.
To specify an earlier version of the Lambda function than the latest, include the Lambda function version in the Lambda function ARN. For more information about Lambda ARNs, see Example ARNs: Amazon Lambda
",
@@ -1618,19 +1618,19 @@
}
},
"RunConfiguration": {
- "base": "Describes the starting parameters for an Kinesis Data Analytics application.
",
+ "base": "Describes the starting parameters for an Managed Service for Apache Flink application.
",
"refs": {
- "StartApplicationRequest$RunConfiguration": "Identifies the run configuration (start parameters) of a Kinesis Data Analytics application.
"
+ "StartApplicationRequest$RunConfiguration": "Identifies the run configuration (start parameters) of a Managed Service for Apache Flink application.
"
}
},
"RunConfigurationDescription": {
- "base": "Describes the starting properties for a Kinesis Data Analytics application.
",
+ "base": "Describes the starting properties for a Managed Service for Apache Flink application.
",
"refs": {
- "ApplicationConfigurationDescription$RunConfigurationDescription": "The details about the starting properties for a Kinesis Data Analytics application.
"
+ "ApplicationConfigurationDescription$RunConfigurationDescription": "The details about the starting properties for a Managed Service for Apache Flink application.
"
}
},
"RunConfigurationUpdate": {
- "base": "Describes the updates to the starting parameters for a Kinesis Data Analytics application.
",
+ "base": "Describes the updates to the starting parameters for a Managed Service for Apache Flink application.
",
"refs": {
"UpdateApplicationRequest$RunConfigurationUpdate": "Describes updates to the application's starting parameters.
"
}
@@ -1640,7 +1640,9 @@
"refs": {
"ApplicationDetail$RuntimeEnvironment": "The runtime environment for the application.
",
"ApplicationSummary$RuntimeEnvironment": "The runtime environment for the application.
",
- "CreateApplicationRequest$RuntimeEnvironment": "The runtime environment for the application.
"
+ "CreateApplicationRequest$RuntimeEnvironment": "The runtime environment for the application.
",
+ "SnapshotDetails$RuntimeEnvironment": "The Flink Runtime for the application snapshot.
",
+ "UpdateApplicationRequest$RuntimeEnvironmentUpdate": "Updates the Managed Service for Apache Flink runtime environment used to run your code. To avoid issues you must:
"
}
},
"S3ApplicationCodeLocationDescription": {
@@ -1674,7 +1676,7 @@
}
},
"S3ContentLocation": {
- "base": "For a Kinesis Data Analytics application provides a description of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the name of the Amazon S3 object that contains the data, and the version number of the Amazon S3 object that contains the data.
",
+ "base": "For a Managed Service for Apache Flink application provides a description of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the name of the Amazon S3 object that contains the data, and the version number of the Amazon S3 object that contains the data.
",
"refs": {
"CodeContent$S3ContentLocation": "Information about the Amazon S3 bucket that contains the application code.
",
"CustomArtifactConfiguration$S3ContentLocation": null,
@@ -1688,9 +1690,9 @@
}
},
"S3ReferenceDataSource": {
- "base": "For a SQL-based Kinesis Data Analytics application, identifies the Amazon S3 bucket and object that contains the reference data.
A Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.
",
+ "base": "For a SQL-based Kinesis Data Analytics application, identifies the Amazon S3 bucket and object that contains the reference data.
A SQL-based Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.
",
"refs": {
- "ReferenceDataSource$S3ReferenceDataSource": "Identifies the S3 bucket and object that contains the reference data. A Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.
"
+ "ReferenceDataSource$S3ReferenceDataSource": "Identifies the S3 bucket and object that contains the reference data. A SQL-based Kinesis Data Analytics application loads reference data only once. If the data changes, you call the UpdateApplication operation to trigger reloading of data into your application.
"
}
},
"S3ReferenceDataSourceDescription": {
@@ -1880,7 +1882,7 @@
"TextContent": {
"base": null,
"refs": {
- "CodeContent$TextContent": "The text-format code for a Flink-based Kinesis Data Analytics application.
",
+ "CodeContent$TextContent": "The text-format code for a Managed Service for Apache Flink application.
",
"CodeContentDescription$TextContent": "The text-format code
",
"CodeContentUpdate$TextContentUpdate": "Describes an update to the text code for an application.
"
}
@@ -1991,45 +1993,45 @@
}
},
"ZeppelinApplicationConfiguration": {
- "base": "The configuration of a Kinesis Data Analytics Studio notebook.
",
+ "base": "The configuration of a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ApplicationConfiguration$ZeppelinApplicationConfiguration": "The configuration parameters for a Kinesis Data Analytics Studio notebook.
"
+ "ApplicationConfiguration$ZeppelinApplicationConfiguration": "The configuration parameters for a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZeppelinApplicationConfigurationDescription": {
- "base": "The configuration of a Kinesis Data Analytics Studio notebook.
",
+ "base": "The configuration of a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ApplicationConfigurationDescription$ZeppelinApplicationConfigurationDescription": "The configuration parameters for a Kinesis Data Analytics Studio notebook.
"
+ "ApplicationConfigurationDescription$ZeppelinApplicationConfigurationDescription": "The configuration parameters for a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZeppelinApplicationConfigurationUpdate": {
- "base": "Updates to the configuration of Kinesis Data Analytics Studio notebook.
",
+ "base": "Updates to the configuration of Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ApplicationConfigurationUpdate$ZeppelinApplicationConfigurationUpdate": "Updates to the configuration of a Kinesis Data Analytics Studio notebook.
"
+ "ApplicationConfigurationUpdate$ZeppelinApplicationConfigurationUpdate": "Updates to the configuration of a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZeppelinMonitoringConfiguration": {
- "base": "Describes configuration parameters for Amazon CloudWatch logging for a Kinesis Data Analytics Studio notebook. For more information about CloudWatch logging, see Monitoring.
",
+ "base": "Describes configuration parameters for Amazon CloudWatch logging for a Managed Service for Apache Flink Studio notebook. For more information about CloudWatch logging, see Monitoring.
",
"refs": {
- "ZeppelinApplicationConfiguration$MonitoringConfiguration": "The monitoring configuration of a Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfiguration$MonitoringConfiguration": "The monitoring configuration of a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZeppelinMonitoringConfigurationDescription": {
- "base": "The monitoring configuration for Apache Zeppelin within a Kinesis Data Analytics Studio notebook.
",
+ "base": "The monitoring configuration for Apache Zeppelin within a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ZeppelinApplicationConfigurationDescription$MonitoringConfigurationDescription": "The monitoring configuration of a Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfigurationDescription$MonitoringConfigurationDescription": "The monitoring configuration of a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZeppelinMonitoringConfigurationUpdate": {
- "base": "Updates to the monitoring configuration for Apache Zeppelin within a Kinesis Data Analytics Studio notebook.
",
+ "base": "Updates to the monitoring configuration for Apache Zeppelin within a Managed Service for Apache Flink Studio notebook.
",
"refs": {
- "ZeppelinApplicationConfigurationUpdate$MonitoringConfigurationUpdate": "Updates to the monitoring configuration of a Kinesis Data Analytics Studio notebook.
"
+ "ZeppelinApplicationConfigurationUpdate$MonitoringConfigurationUpdate": "Updates to the monitoring configuration of a Managed Service for Apache Flink Studio notebook.
"
}
},
"ZipFileContent": {
"base": null,
"refs": {
- "CodeContent$ZipFileContent": "The zip-format code for a Flink-based Kinesis Data Analytics application.
",
+ "CodeContent$ZipFileContent": "The zip-format code for a Managed Service for Apache Flink application.
",
"CodeContentUpdate$ZipFileContentUpdate": "Describes an update to the zipped code for an application.
"
}
}
diff --git a/models/apis/kinesisanalyticsv2/2018-05-23/endpoint-rule-set-1.json b/models/apis/kinesisanalyticsv2/2018-05-23/endpoint-rule-set-1.json
index 8e82638e612..ba525e4e222 100644
--- a/models/apis/kinesisanalyticsv2/2018-05-23/endpoint-rule-set-1.json
+++ b/models/apis/kinesisanalyticsv2/2018-05-23/endpoint-rule-set-1.json
@@ -40,7 +40,6 @@
]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
@@ -58,293 +57,258 @@
"type": "error"
},
{
- "conditions": [],
- "type": "tree",
- "rules": [
+ "conditions": [
{
- "conditions": [
+ "fn": "booleanEquals",
+ "argv": [
{
- "fn": "booleanEquals",
- "argv": [
- {
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
- "type": "error"
- },
- {
- "conditions": [],
- "endpoint": {
- "url": {
- "ref": "Endpoint"
+ "ref": "UseDualStack"
},
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
+ true
+ ]
}
- ]
+ ],
+ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported",
+ "type": "error"
+ },
+ {
+ "conditions": [],
+ "endpoint": {
+ "url": {
+ "ref": "Endpoint"
+ },
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
{
- "conditions": [],
- "type": "tree",
+ "conditions": [
+ {
+ "fn": "isSet",
+ "argv": [
+ {
+ "ref": "Region"
+ }
+ ]
+ }
+ ],
"rules": [
{
"conditions": [
{
- "fn": "isSet",
+ "fn": "aws.partition",
"argv": [
{
"ref": "Region"
}
- ]
+ ],
+ "assign": "PartitionResult"
}
],
- "type": "tree",
"rules": [
{
"conditions": [
{
- "fn": "aws.partition",
+ "fn": "booleanEquals",
"argv": [
{
- "ref": "Region"
- }
- ],
- "assign": "PartitionResult"
+ "ref": "UseFIPS"
+ },
+ true
+ ]
+ },
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseDualStack"
+ },
+ true
+ ]
}
],
- "type": "tree",
"rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseFIPS"
- },
- true
+ "fn": "getAttr",
+ "argv": [
+ {
+ "ref": "PartitionResult"
+ },
+ "supportsFIPS"
+ ]
+ }
]
},
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
+ "fn": "getAttr",
"argv": [
- true,
{
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- },
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
+ "ref": "PartitionResult"
+ },
+ "supportsDualStack"
]
}
]
- },
+ }
+ ],
+ "rules": [
{
"conditions": [],
- "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
- "type": "error"
+ "endpoint": {
+ "url": "https://kinesisanalytics-fips.{Region}.{PartitionResult#dualStackDnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
+ {
+ "conditions": [],
+ "error": "FIPS and DualStack are enabled, but this partition does not support one or both",
+ "type": "error"
+ }
+ ],
+ "type": "tree"
+ },
+ {
+ "conditions": [
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseFIPS"
+ },
+ true
+ ]
+ }
+ ],
+ "rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
{
- "ref": "UseFIPS"
+ "fn": "getAttr",
+ "argv": [
+ {
+ "ref": "PartitionResult"
+ },
+ "supportsFIPS"
+ ]
},
true
]
}
],
- "type": "tree",
"rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
- "argv": [
- true,
- {
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsFIPS"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
- ]
- }
- ]
- },
{
"conditions": [],
- "error": "FIPS is enabled but this partition does not support FIPS",
- "type": "error"
+ "endpoint": {
+ "url": "https://kinesisanalytics-fips.{Region}.{PartitionResult#dnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
},
+ {
+ "conditions": [],
+ "error": "FIPS is enabled but this partition does not support FIPS",
+ "type": "error"
+ }
+ ],
+ "type": "tree"
+ },
+ {
+ "conditions": [
+ {
+ "fn": "booleanEquals",
+ "argv": [
+ {
+ "ref": "UseDualStack"
+ },
+ true
+ ]
+ }
+ ],
+ "rules": [
{
"conditions": [
{
"fn": "booleanEquals",
"argv": [
+ true,
{
- "ref": "UseDualStack"
- },
- true
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [
- {
- "fn": "booleanEquals",
+ "fn": "getAttr",
"argv": [
- true,
{
- "fn": "getAttr",
- "argv": [
- {
- "ref": "PartitionResult"
- },
- "supportsDualStack"
- ]
- }
- ]
- }
- ],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "type": "tree",
- "rules": [
- {
- "conditions": [],
- "endpoint": {
- "url": "https://kinesisanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",
- "properties": {},
- "headers": {}
- },
- "type": "endpoint"
- }
+ "ref": "PartitionResult"
+ },
+ "supportsDualStack"
]
}
]
- },
- {
- "conditions": [],
- "error": "DualStack is enabled but this partition does not support DualStack",
- "type": "error"
}
- ]
- },
- {
- "conditions": [],
- "type": "tree",
+ ],
"rules": [
{
"conditions": [],
"endpoint": {
- "url": "https://kinesisanalytics.{Region}.{PartitionResult#dnsSuffix}",
+ "url": "https://kinesisanalytics.{Region}.{PartitionResult#dualStackDnsSuffix}",
"properties": {},
"headers": {}
},
"type": "endpoint"
}
- ]
+ ],
+ "type": "tree"
+ },
+ {
+ "conditions": [],
+ "error": "DualStack is enabled but this partition does not support DualStack",
+ "type": "error"
}
- ]
+ ],
+ "type": "tree"
+ },
+ {
+ "conditions": [],
+ "endpoint": {
+ "url": "https://kinesisanalytics.{Region}.{PartitionResult#dnsSuffix}",
+ "properties": {},
+ "headers": {}
+ },
+ "type": "endpoint"
}
- ]
- },
- {
- "conditions": [],
- "error": "Invalid Configuration: Missing Region",
- "type": "error"
+ ],
+ "type": "tree"
}
- ]
+ ],
+ "type": "tree"
+ },
+ {
+ "conditions": [],
+ "error": "Invalid Configuration: Missing Region",
+ "type": "error"
}
]
}
\ No newline at end of file
diff --git a/models/apis/s3/2006-03-01/endpoint-rule-set-1.json b/models/apis/s3/2006-03-01/endpoint-rule-set-1.json
index d1bf31a5981..66392c565c4 100644
--- a/models/apis/s3/2006-03-01/endpoint-rule-set-1.json
+++ b/models/apis/s3/2006-03-01/endpoint-rule-set-1.json
@@ -1340,6 +1340,14 @@
"url": "https://{Bucket}.ec2.{url#authority}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
@@ -1361,6 +1369,14 @@
"url": "https://{Bucket}.ec2.s3-outposts.{Region}.{regionPartition#dnsSuffix}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
@@ -1445,6 +1461,14 @@
"url": "https://{Bucket}.op-{outpostId}.{url#authority}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
@@ -1466,6 +1490,14 @@
"url": "https://{Bucket}.op-{outpostId}.s3-outposts.{Region}.{regionPartition#dnsSuffix}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
@@ -5512,6 +5544,14 @@
"url": "https://{accessPointName}-{bucketArn#accountId}.{outpostId}.{url#authority}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
@@ -5530,6 +5570,14 @@
"url": "https://{accessPointName}-{bucketArn#accountId}.{outpostId}.s3-outposts.{bucketArn#region}.{bucketPartition#dnsSuffix}",
"properties": {
"authSchemes": [
+ {
+ "disableDoubleEncoding": true,
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ]
+ },
{
"disableDoubleEncoding": true,
"name": "sigv4",
diff --git a/models/apis/s3/2006-03-01/endpoint-tests-1.json b/models/apis/s3/2006-03-01/endpoint-tests-1.json
index ab313b9d9c8..da5ccda22e5 100644
--- a/models/apis/s3/2006-03-01/endpoint-tests-1.json
+++ b/models/apis/s3/2006-03-01/endpoint-tests-1.json
@@ -2543,6 +2543,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -6189,6 +6197,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -6226,6 +6242,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -6323,6 +6347,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -6363,6 +6395,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -6429,6 +6469,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7419,6 +7467,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7444,6 +7500,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7469,6 +7533,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7494,6 +7566,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7519,6 +7599,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
@@ -7545,6 +7633,14 @@
"endpoint": {
"properties": {
"authSchemes": [
+ {
+ "name": "sigv4a",
+ "signingName": "s3-outposts",
+ "signingRegionSet": [
+ "*"
+ ],
+ "disableDoubleEncoding": true
+ },
{
"name": "sigv4",
"signingName": "s3-outposts",
diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json
index 2a15cc22002..d4cbbd175a9 100644
--- a/models/apis/s3/2006-03-01/examples-1.json
+++ b/models/apis/s3/2006-03-01/examples-1.json
@@ -84,10 +84,13 @@
"CreateBucket": [
{
"input": {
- "Bucket": "examplebucket"
+ "Bucket": "examplebucket",
+ "CreateBucketConfiguration": {
+ "LocationConstraint": "eu-west-1"
+ }
},
"output": {
- "Location": "/examplebucket"
+ "Location": "http://examplebucket..s3.amazonaws.com/"
},
"comments": {
"input": {
@@ -95,19 +98,16 @@
"output": {
}
},
- "description": "The following example creates a bucket.",
- "id": "to-create-a-bucket--1472851826060",
- "title": "To create a bucket "
+ "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.",
+ "id": "to-create-a-bucket-in-a-specific-region-1483399072992",
+ "title": "To create a bucket in a specific region"
},
{
"input": {
- "Bucket": "examplebucket",
- "CreateBucketConfiguration": {
- "LocationConstraint": "eu-west-1"
- }
+ "Bucket": "examplebucket"
},
"output": {
- "Location": "http://examplebucket..s3.amazonaws.com/"
+ "Location": "/examplebucket"
},
"comments": {
"input": {
@@ -115,9 +115,9 @@
"output": {
}
},
- "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.",
- "id": "to-create-a-bucket-in-a-specific-region-1483399072992",
- "title": "To create a bucket in a specific region"
+ "description": "The following example creates a bucket.",
+ "id": "to-create-a-bucket--1472851826060",
+ "title": "To create a bucket "
}
],
"CreateMultipartUpload": [
@@ -292,11 +292,10 @@
{
"input": {
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg",
- "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
+ "Key": "HappyFace.jpg"
},
"output": {
- "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
+ "VersionId": "null"
},
"comments": {
"input": {
@@ -304,17 +303,18 @@
"output": {
}
},
- "description": "The following example removes tag set associated with the specified object version. The request specifies both the object key and object version.",
- "id": "to-remove-tag-set-from-an-object-version-1483145285913",
- "title": "To remove tag set from an object version"
+ "description": "The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the operation removes tag set from the latest object version.",
+ "id": "to-remove-tag-set-from-an-object-1483145342862",
+ "title": "To remove tag set from an object"
},
{
"input": {
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg"
+ "Key": "HappyFace.jpg",
+ "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
},
"output": {
- "VersionId": "null"
+ "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
},
"comments": {
"input": {
@@ -322,9 +322,9 @@
"output": {
}
},
- "description": "The following example removes tag set associated with the specified object. If the bucket is versioning enabled, the operation removes tag set from the latest object version.",
- "id": "to-remove-tag-set-from-an-object-1483145342862",
- "title": "To remove tag set from an object"
+ "description": "The following example removes tag set associated with the specified object version. The request specifies both the object key and object version.",
+ "id": "to-remove-tag-set-from-an-object-version-1483145285913",
+ "title": "To remove tag set from an object version"
}
],
"DeleteObjects": [
@@ -334,12 +334,10 @@
"Delete": {
"Objects": [
{
- "Key": "HappyFace.jpg",
- "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"
+ "Key": "objectkey1"
},
{
- "Key": "HappyFace.jpg",
- "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"
+ "Key": "objectkey2"
}
],
"Quiet": false
@@ -348,12 +346,14 @@
"output": {
"Deleted": [
{
- "Key": "HappyFace.jpg",
- "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"
+ "DeleteMarker": "true",
+ "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F",
+ "Key": "objectkey1"
},
{
- "Key": "HappyFace.jpg",
- "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"
+ "DeleteMarker": "true",
+ "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt",
+ "Key": "objectkey2"
}
]
},
@@ -363,9 +363,9 @@
"output": {
}
},
- "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.",
- "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737",
- "title": "To delete multiple object versions from a versioned bucket"
+ "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.",
+ "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805",
+ "title": "To delete multiple objects from a versioned bucket"
},
{
"input": {
@@ -373,10 +373,12 @@
"Delete": {
"Objects": [
{
- "Key": "objectkey1"
+ "Key": "HappyFace.jpg",
+ "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"
},
{
- "Key": "objectkey2"
+ "Key": "HappyFace.jpg",
+ "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"
}
],
"Quiet": false
@@ -385,14 +387,12 @@
"output": {
"Deleted": [
{
- "DeleteMarker": "true",
- "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F",
- "Key": "objectkey1"
+ "Key": "HappyFace.jpg",
+ "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"
},
{
- "DeleteMarker": "true",
- "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt",
- "Key": "objectkey2"
+ "Key": "HappyFace.jpg",
+ "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"
}
]
},
@@ -402,9 +402,9 @@
"output": {
}
},
- "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.",
- "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805",
- "title": "To delete multiple objects from a versioned bucket"
+ "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.",
+ "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737",
+ "title": "To delete multiple object versions from a versioned bucket"
}
],
"GetBucketCors": [
@@ -840,20 +840,17 @@
{
"input": {
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg"
+ "Key": "exampleobject",
+ "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
},
"output": {
"TagSet": [
{
- "Key": "Key4",
- "Value": "Value4"
- },
- {
- "Key": "Key3",
- "Value": "Value3"
+ "Key": "Key1",
+ "Value": "Value1"
}
],
- "VersionId": "null"
+ "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
},
"comments": {
"input": {
@@ -861,24 +858,27 @@
"output": {
}
},
- "description": "The following example retrieves tag set of an object.",
- "id": "to-retrieve-tag-set-of-an-object-1481833847896",
- "title": "To retrieve tag set of an object"
+ "description": "The following example retrieves tag set of an object. The request specifies object version.",
+ "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663",
+ "title": "To retrieve tag set of a specific object version"
},
{
"input": {
"Bucket": "examplebucket",
- "Key": "exampleobject",
- "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
+ "Key": "HappyFace.jpg"
},
"output": {
"TagSet": [
{
- "Key": "Key1",
- "Value": "Value1"
+ "Key": "Key4",
+ "Value": "Value4"
+ },
+ {
+ "Key": "Key3",
+ "Value": "Value3"
}
],
- "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"
+ "VersionId": "null"
},
"comments": {
"input": {
@@ -886,9 +886,9 @@
"output": {
}
},
- "description": "The following example retrieves tag set of an object. The request specifies object version.",
- "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663",
- "title": "To retrieve tag set of a specific object version"
+ "description": "The following example retrieves tag set of an object.",
+ "id": "to-retrieve-tag-set-of-an-object-1481833847896",
+ "title": "To retrieve tag set of an object"
}
],
"GetObjectTorrent": [
@@ -1569,14 +1569,11 @@
"input": {
"Body": "filetoupload",
"Bucket": "examplebucket",
- "Key": "exampleobject",
- "ServerSideEncryption": "AES256",
- "Tagging": "key1=value1&key2=value2"
+ "Key": "objectkey"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "ServerSideEncryption": "AES256",
- "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt"
+ "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ"
},
"comments": {
"input": {
@@ -1584,19 +1581,23 @@
"output": {
}
},
- "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.",
- "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831",
- "title": "To upload an object and specify server-side encryption and object tags"
+ "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.",
+ "id": "to-create-an-object-1483147613675",
+ "title": "To create an object."
},
{
"input": {
"Body": "filetoupload",
"Bucket": "examplebucket",
- "Key": "objectkey"
+ "Key": "exampleobject",
+ "Metadata": {
+ "metadata1": "value1",
+ "metadata2": "value2"
+ }
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ"
+ "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0"
},
"comments": {
"input": {
@@ -1604,22 +1605,19 @@
"output": {
}
},
- "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.",
- "id": "to-create-an-object-1483147613675",
- "title": "To create an object."
+ "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.",
+ "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757",
+ "title": "To upload object and specify user-defined metadata"
},
{
"input": {
"Body": "HappyFace.jpg",
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg",
- "ServerSideEncryption": "AES256",
- "StorageClass": "STANDARD_IA"
+ "Key": "HappyFace.jpg"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "ServerSideEncryption": "AES256",
- "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp"
+ "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk"
},
"comments": {
"input": {
@@ -1627,20 +1625,20 @@
"output": {
}
},
- "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.",
- "id": "to-upload-an-object-(specify-optional-headers)",
- "title": "To upload an object (specify optional headers)"
+ "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.",
+ "id": "to-upload-an-object-1481760101010",
+ "title": "To upload an object"
},
{
"input": {
- "Body": "c:\\HappyFace.jpg",
+ "ACL": "authenticated-read",
+ "Body": "filetoupload",
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg",
- "Tagging": "key1=value1&key2=value2"
+ "Key": "exampleobject"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a"
+ "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr"
},
"comments": {
"input": {
@@ -1648,23 +1646,22 @@
"output": {
}
},
- "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.",
- "id": "to-upload-an-object-and-specify-optional-tags-1481762310955",
- "title": "To upload an object and specify optional tags"
+ "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.",
+ "id": "to-upload-an-object-and-specify-canned-acl-1483397779571",
+ "title": "To upload an object and specify canned ACL."
},
{
"input": {
- "Body": "filetoupload",
+ "Body": "HappyFace.jpg",
"Bucket": "examplebucket",
- "Key": "exampleobject",
- "Metadata": {
- "metadata1": "value1",
- "metadata2": "value2"
- }
+ "Key": "HappyFace.jpg",
+ "ServerSideEncryption": "AES256",
+ "StorageClass": "STANDARD_IA"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0"
+ "ServerSideEncryption": "AES256",
+ "VersionId": "CG612hodqujkf8FaaNfp8U..FIhLROcp"
},
"comments": {
"input": {
@@ -1672,20 +1669,20 @@
"output": {
}
},
- "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.",
- "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757",
- "title": "To upload object and specify user-defined metadata"
+ "description": "The following example uploads an object. The request specifies optional request headers to directs S3 to use specific storage class and use server-side encryption.",
+ "id": "to-upload-an-object-(specify-optional-headers)",
+ "title": "To upload an object (specify optional headers)"
},
{
"input": {
- "ACL": "authenticated-read",
- "Body": "filetoupload",
+ "Body": "c:\\HappyFace.jpg",
"Bucket": "examplebucket",
- "Key": "exampleobject"
+ "Key": "HappyFace.jpg",
+ "Tagging": "key1=value1&key2=value2"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "VersionId": "Kirh.unyZwjQ69YxcQLA8z4F5j3kJJKr"
+ "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a"
},
"comments": {
"input": {
@@ -1693,19 +1690,22 @@
"output": {
}
},
- "description": "The following example uploads and object. The request specifies optional canned ACL (access control list) to all READ access to authenticated users. If the bucket is versioning enabled, S3 returns version ID in response.",
- "id": "to-upload-an-object-and-specify-canned-acl-1483397779571",
- "title": "To upload an object and specify canned ACL."
+ "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.",
+ "id": "to-upload-an-object-and-specify-optional-tags-1481762310955",
+ "title": "To upload an object and specify optional tags"
},
{
"input": {
- "Body": "HappyFace.jpg",
+ "Body": "filetoupload",
"Bucket": "examplebucket",
- "Key": "HappyFace.jpg"
+ "Key": "exampleobject",
+ "ServerSideEncryption": "AES256",
+ "Tagging": "key1=value1&key2=value2"
},
"output": {
"ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"",
- "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk"
+ "ServerSideEncryption": "AES256",
+ "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt"
},
"comments": {
"input": {
@@ -1713,9 +1713,9 @@
"output": {
}
},
- "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.",
- "id": "to-upload-an-object-1481760101010",
- "title": "To upload an object"
+ "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.",
+ "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831",
+ "title": "To upload an object and specify server-side encryption and object tags"
}
],
"PutObjectAcl": [
@@ -1826,14 +1826,15 @@
"input": {
"Bucket": "examplebucket",
"CopySource": "/bucketname/sourceobjectkey",
+ "CopySourceRange": "bytes=1-100000",
"Key": "examplelargeobject",
- "PartNumber": "1",
+ "PartNumber": "2",
"UploadId": "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"
},
"output": {
"CopyPartResult": {
- "ETag": "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
- "LastModified": "2016-12-29T21:24:43.000Z"
+ "ETag": "\"65d16d19e65a7508a51f043180edcc36\"",
+ "LastModified": "2016-12-29T21:44:28.000Z"
}
},
"comments": {
@@ -1842,23 +1843,22 @@
"output": {
}
},
- "description": "The following example uploads a part of a multipart upload by copying data from an existing object as data source.",
- "id": "to-upload-a-part-by-copying-data-from-an-existing-object-as-data-source-1483046746348",
- "title": "To upload a part by copying data from an existing object as data source"
+ "description": "The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as data source.",
+ "id": "to-upload-a-part-by-copying-byte-range-from-an-existing-object-as-data-source-1483048068594",
+ "title": "To upload a part by copying byte range from an existing object as data source"
},
{
"input": {
"Bucket": "examplebucket",
"CopySource": "/bucketname/sourceobjectkey",
- "CopySourceRange": "bytes=1-100000",
"Key": "examplelargeobject",
- "PartNumber": "2",
+ "PartNumber": "1",
"UploadId": "exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"
},
"output": {
"CopyPartResult": {
- "ETag": "\"65d16d19e65a7508a51f043180edcc36\"",
- "LastModified": "2016-12-29T21:44:28.000Z"
+ "ETag": "\"b0c6f0e7e054ab8fa2536a2677f8734d\"",
+ "LastModified": "2016-12-29T21:24:43.000Z"
}
},
"comments": {
@@ -1867,9 +1867,9 @@
"output": {
}
},
- "description": "The following example uploads a part of a multipart upload by copying a specified byte range from an existing object as data source.",
- "id": "to-upload-a-part-by-copying-byte-range-from-an-existing-object-as-data-source-1483048068594",
- "title": "To upload a part by copying byte range from an existing object as data source"
+ "description": "The following example uploads a part of a multipart upload by copying data from an existing object as data source.",
+ "id": "to-upload-a-part-by-copying-data-from-an-existing-object-as-data-source-1483046746348",
+ "title": "To upload a part by copying data from an existing object as data source"
}
]
}
diff --git a/service/ivsrealtime/api.go b/service/ivsrealtime/api.go
index b0d59bb36cf..6110f50941e 100644
--- a/service/ivsrealtime/api.go
+++ b/service/ivsrealtime/api.go
@@ -4908,6 +4908,21 @@ type GridConfiguration struct {
// attribute set to "true" (as a string value) in ParticipantTokenConfiguration
// is placed in the featured slot.
FeaturedParticipantAttribute *string `locationName:"featuredParticipantAttribute" type:"string"`
+
+ // Specifies the spacing between participant tiles in pixels. Default: 2.
+ GridGap *int64 `locationName:"gridGap" type:"integer"`
+
+ // Determines whether to omit participants with stopped video in the composition.
+ // Default: false.
+ OmitStoppedVideo *bool `locationName:"omitStoppedVideo" type:"boolean"`
+
+ // Sets the non-featured participant display mode. Default: VIDEO.
+ VideoAspectRatio *string `locationName:"videoAspectRatio" type:"string" enum:"VideoAspectRatio"`
+
+ // Defines how video fits within the participant tile. When not set, videoFillMode
+ // defaults to COVER fill mode for participants in the grid and to CONTAIN fill
+ // mode for featured participants.
+ VideoFillMode *string `locationName:"videoFillMode" type:"string" enum:"VideoFillMode"`
}
// String returns the string representation.
@@ -4934,6 +4949,30 @@ func (s *GridConfiguration) SetFeaturedParticipantAttribute(v string) *GridConfi
return s
}
+// SetGridGap sets the GridGap field's value.
+func (s *GridConfiguration) SetGridGap(v int64) *GridConfiguration {
+ s.GridGap = &v
+ return s
+}
+
+// SetOmitStoppedVideo sets the OmitStoppedVideo field's value.
+func (s *GridConfiguration) SetOmitStoppedVideo(v bool) *GridConfiguration {
+ s.OmitStoppedVideo = &v
+ return s
+}
+
+// SetVideoAspectRatio sets the VideoAspectRatio field's value.
+func (s *GridConfiguration) SetVideoAspectRatio(v string) *GridConfiguration {
+ s.VideoAspectRatio = &v
+ return s
+}
+
+// SetVideoFillMode sets the VideoFillMode field's value.
+func (s *GridConfiguration) SetVideoFillMode(v string) *GridConfiguration {
+ s.VideoFillMode = &v
+ return s
+}
+
type InternalServerException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@@ -5006,6 +5045,9 @@ type LayoutConfiguration struct {
// Configuration related to grid layout. Default: Grid layout.
Grid *GridConfiguration `locationName:"grid" type:"structure"`
+
+ // Configuration related to PiP layout.
+ Pip *PipConfiguration `locationName:"pip" type:"structure"`
}
// String returns the string representation.
@@ -5026,12 +5068,33 @@ func (s LayoutConfiguration) GoString() string {
return s.String()
}
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LayoutConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LayoutConfiguration"}
+ if s.Pip != nil {
+ if err := s.Pip.Validate(); err != nil {
+ invalidParams.AddNested("Pip", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
// SetGrid sets the Grid field's value.
func (s *LayoutConfiguration) SetGrid(v *GridConfiguration) *LayoutConfiguration {
s.Grid = v
return s
}
+// SetPip sets the Pip field's value.
+func (s *LayoutConfiguration) SetPip(v *PipConfiguration) *LayoutConfiguration {
+ s.Pip = v
+ return s
+}
+
type ListCompositionsInput struct {
_ struct{} `type:"structure"`
@@ -6410,6 +6473,146 @@ func (s *PendingVerification) RequestID() string {
return s.RespMetadata.RequestID
}
+// Configuration information specific to Picture-in-Picture (PiP) layout, for
+// server-side composition (https://docs.aws.amazon.com/ivs/latest/RealTimeUserGuide/server-side-composition.html).
+type PipConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // This attribute name identifies the featured slot. A participant with this
+ // attribute set to "true" (as a string value) in ParticipantTokenConfiguration
+ // is placed in the featured slot.
+ FeaturedParticipantAttribute *string `locationName:"featuredParticipantAttribute" type:"string"`
+
+ // Specifies the spacing between participant tiles in pixels. Default: 0.
+ GridGap *int64 `locationName:"gridGap" type:"integer"`
+
+ // Determines whether to omit participants with stopped video in the composition.
+ // Default: false.
+ OmitStoppedVideo *bool `locationName:"omitStoppedVideo" type:"boolean"`
+
+ // Defines PiP behavior when all participants have left. Default: STATIC.
+ PipBehavior *string `locationName:"pipBehavior" type:"string" enum:"PipBehavior"`
+
+ // Specifies the height of the PiP window in pixels. When this is not set explicitly,
+ // pipHeight’s value will be based on the size of the composition and the
+ // aspect ratio of the participant’s video.
+ PipHeight *int64 `locationName:"pipHeight" min:"1" type:"integer"`
+
+ // Sets the PiP window’s offset position in pixels from the closest edges
+ // determined by PipPosition. Default: 0.
+ PipOffset *int64 `locationName:"pipOffset" type:"integer"`
+
+ // Identifies the PiP slot. A participant with this attribute set to "true"
+ // (as a string value) in ParticipantTokenConfiguration is placed in the PiP
+ // slot.
+ PipParticipantAttribute *string `locationName:"pipParticipantAttribute" type:"string"`
+
+ // Determines the corner position of the PiP window. Default: BOTTOM_RIGHT.
+ PipPosition *string `locationName:"pipPosition" type:"string" enum:"PipPosition"`
+
+ // Specifies the width of the PiP window in pixels. When this is not set explicitly,
+ // pipWidth’s value will be based on the size of the composition and the aspect
+ // ratio of the participant’s video.
+ PipWidth *int64 `locationName:"pipWidth" min:"1" type:"integer"`
+
+ // Defines how video fits within the participant tile. Default: COVER.
+ VideoFillMode *string `locationName:"videoFillMode" type:"string" enum:"VideoFillMode"`
+}
+
+// String returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PipConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation.
+//
+// API parameter values that are decorated as "sensitive" in the API will not
+// be included in the string output. The member name will be present, but the
+// value will be replaced with "sensitive".
+func (s PipConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PipConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PipConfiguration"}
+ if s.PipHeight != nil && *s.PipHeight < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("PipHeight", 1))
+ }
+ if s.PipWidth != nil && *s.PipWidth < 1 {
+ invalidParams.Add(request.NewErrParamMinValue("PipWidth", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// SetFeaturedParticipantAttribute sets the FeaturedParticipantAttribute field's value.
+func (s *PipConfiguration) SetFeaturedParticipantAttribute(v string) *PipConfiguration {
+ s.FeaturedParticipantAttribute = &v
+ return s
+}
+
+// SetGridGap sets the GridGap field's value.
+func (s *PipConfiguration) SetGridGap(v int64) *PipConfiguration {
+ s.GridGap = &v
+ return s
+}
+
+// SetOmitStoppedVideo sets the OmitStoppedVideo field's value.
+func (s *PipConfiguration) SetOmitStoppedVideo(v bool) *PipConfiguration {
+ s.OmitStoppedVideo = &v
+ return s
+}
+
+// SetPipBehavior sets the PipBehavior field's value.
+func (s *PipConfiguration) SetPipBehavior(v string) *PipConfiguration {
+ s.PipBehavior = &v
+ return s
+}
+
+// SetPipHeight sets the PipHeight field's value.
+func (s *PipConfiguration) SetPipHeight(v int64) *PipConfiguration {
+ s.PipHeight = &v
+ return s
+}
+
+// SetPipOffset sets the PipOffset field's value.
+func (s *PipConfiguration) SetPipOffset(v int64) *PipConfiguration {
+ s.PipOffset = &v
+ return s
+}
+
+// SetPipParticipantAttribute sets the PipParticipantAttribute field's value.
+func (s *PipConfiguration) SetPipParticipantAttribute(v string) *PipConfiguration {
+ s.PipParticipantAttribute = &v
+ return s
+}
+
+// SetPipPosition sets the PipPosition field's value.
+func (s *PipConfiguration) SetPipPosition(v string) *PipConfiguration {
+ s.PipPosition = &v
+ return s
+}
+
+// SetPipWidth sets the PipWidth field's value.
+func (s *PipConfiguration) SetPipWidth(v int64) *PipConfiguration {
+ s.PipWidth = &v
+ return s
+}
+
+// SetVideoFillMode sets the VideoFillMode field's value.
+func (s *PipConfiguration) SetVideoFillMode(v string) *PipConfiguration {
+ s.VideoFillMode = &v
+ return s
+}
+
// An object representing a configuration to record a stage stream.
type RecordingConfiguration struct {
_ struct{} `type:"structure"`
@@ -7049,6 +7252,11 @@ func (s *StartCompositionInput) Validate() error {
}
}
}
+ if s.Layout != nil {
+ if err := s.Layout.Validate(); err != nil {
+ invalidParams.AddNested("Layout", err.(request.ErrInvalidParams))
+ }
+ }
if invalidParams.Len() > 0 {
return invalidParams
@@ -7893,6 +8101,46 @@ func ParticipantTokenCapability_Values() []string {
}
}
+const (
+ // PipBehaviorStatic is a PipBehavior enum value
+ PipBehaviorStatic = "STATIC"
+
+ // PipBehaviorDynamic is a PipBehavior enum value
+ PipBehaviorDynamic = "DYNAMIC"
+)
+
+// PipBehavior_Values returns all elements of the PipBehavior enum
+func PipBehavior_Values() []string {
+ return []string{
+ PipBehaviorStatic,
+ PipBehaviorDynamic,
+ }
+}
+
+const (
+ // PipPositionTopLeft is a PipPosition enum value
+ PipPositionTopLeft = "TOP_LEFT"
+
+ // PipPositionTopRight is a PipPosition enum value
+ PipPositionTopRight = "TOP_RIGHT"
+
+ // PipPositionBottomLeft is a PipPosition enum value
+ PipPositionBottomLeft = "BOTTOM_LEFT"
+
+ // PipPositionBottomRight is a PipPosition enum value
+ PipPositionBottomRight = "BOTTOM_RIGHT"
+)
+
+// PipPosition_Values returns all elements of the PipPosition enum
+func PipPosition_Values() []string {
+ return []string{
+ PipPositionTopLeft,
+ PipPositionTopRight,
+ PipPositionBottomLeft,
+ PipPositionBottomRight,
+ }
+}
+
const (
// RecordingConfigurationFormatHls is a RecordingConfigurationFormat enum value
RecordingConfigurationFormatHls = "HLS"
@@ -7904,3 +8152,47 @@ func RecordingConfigurationFormat_Values() []string {
RecordingConfigurationFormatHls,
}
}
+
+const (
+ // VideoAspectRatioAuto is a VideoAspectRatio enum value
+ VideoAspectRatioAuto = "AUTO"
+
+ // VideoAspectRatioVideo is a VideoAspectRatio enum value
+ VideoAspectRatioVideo = "VIDEO"
+
+ // VideoAspectRatioSquare is a VideoAspectRatio enum value
+ VideoAspectRatioSquare = "SQUARE"
+
+ // VideoAspectRatioPortrait is a VideoAspectRatio enum value
+ VideoAspectRatioPortrait = "PORTRAIT"
+)
+
+// VideoAspectRatio_Values returns all elements of the VideoAspectRatio enum
+func VideoAspectRatio_Values() []string {
+ return []string{
+ VideoAspectRatioAuto,
+ VideoAspectRatioVideo,
+ VideoAspectRatioSquare,
+ VideoAspectRatioPortrait,
+ }
+}
+
+const (
+ // VideoFillModeFill is a VideoFillMode enum value
+ VideoFillModeFill = "FILL"
+
+ // VideoFillModeCover is a VideoFillMode enum value
+ VideoFillModeCover = "COVER"
+
+ // VideoFillModeContain is a VideoFillMode enum value
+ VideoFillModeContain = "CONTAIN"
+)
+
+// VideoFillMode_Values returns all elements of the VideoFillMode enum
+func VideoFillMode_Values() []string {
+ return []string{
+ VideoFillModeFill,
+ VideoFillModeCover,
+ VideoFillModeContain,
+ }
+}
diff --git a/service/kinesisanalyticsv2/api.go b/service/kinesisanalyticsv2/api.go
index e5b1b827d0b..c8bd2c37dd4 100644
--- a/service/kinesisanalyticsv2/api.go
+++ b/service/kinesisanalyticsv2/api.go
@@ -565,13 +565,15 @@ func (c *KinesisAnalyticsV2) AddApplicationVpcConfigurationRequest(input *AddApp
// Adds a Virtual Private Cloud (VPC) configuration to the application. Applications
// can use VPCs to store and access resources securely.
//
-// Note the following about VPC configurations for Kinesis Data Analytics applications:
+// Note the following about VPC configurations for Managed Service for Apache
+// Flink applications:
//
// - VPC configurations are not supported for SQL applications.
//
-// - When a VPC is added to a Kinesis Data Analytics application, the application
-// can no longer be accessed from the Internet directly. To enable Internet
-// access to the application, add an Internet gateway to your VPC.
+// - When a VPC is added to a Managed Service for Apache Flink application,
+// the application can no longer be accessed from the Internet directly.
+// To enable Internet access to the application, add an Internet gateway
+// to your VPC.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -664,8 +666,9 @@ func (c *KinesisAnalyticsV2) CreateApplicationRequest(input *CreateApplicationIn
// CreateApplication API operation for Amazon Kinesis Analytics.
//
-// Creates a Kinesis Data Analytics application. For information about creating
-// a Kinesis Data Analytics application, see Creating an Application (https://docs.aws.amazon.com/kinesisanalytics/latest/java/getting-started.html).
+// Creates a Managed Service for Apache Flink application. For information about
+// creating a Managed Service for Apache Flink application, see Creating an
+// Application (https://docs.aws.amazon.com/kinesisanalytics/latest/java/getting-started.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -970,8 +973,8 @@ func (c *KinesisAnalyticsV2) DeleteApplicationRequest(input *DeleteApplicationIn
// DeleteApplication API operation for Amazon Kinesis Analytics.
//
-// Deletes the specified application. Kinesis Data Analytics halts application
-// execution and deletes the application.
+// Deletes the specified application. Managed Service for Apache Flink halts
+// application execution and deletes the application.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1067,7 +1070,8 @@ func (c *KinesisAnalyticsV2) DeleteApplicationCloudWatchLoggingOptionRequest(inp
// DeleteApplicationCloudWatchLoggingOption API operation for Amazon Kinesis Analytics.
//
-// Deletes an Amazon CloudWatch log stream from an Kinesis Data Analytics application.
+// Deletes an Amazon CloudWatch log stream from an SQL-based Kinesis Data Analytics
+// application.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1548,7 +1552,7 @@ func (c *KinesisAnalyticsV2) DeleteApplicationVpcConfigurationRequest(input *Del
// DeleteApplicationVpcConfiguration API operation for Amazon Kinesis Analytics.
//
-// Removes a VPC configuration from a Kinesis Data Analytics application.
+// Removes a VPC configuration from a Managed Service for Apache Flink application.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -1641,7 +1645,7 @@ func (c *KinesisAnalyticsV2) DescribeApplicationRequest(input *DescribeApplicati
// DescribeApplication API operation for Amazon Kinesis Analytics.
//
-// Returns information about a specific Kinesis Data Analytics application.
+// Returns information about a specific Managed Service for Apache Flink application.
//
// If you want to retrieve a list of all applications in your account, use the
// ListApplications operation.
@@ -1819,8 +1823,7 @@ func (c *KinesisAnalyticsV2) DescribeApplicationVersionRequest(input *DescribeAp
// To see a list of all the versions of an application, invoke the ListApplicationVersions
// operation.
//
-// This operation is supported only for Amazon Kinesis Data Analytics for Apache
-// Flink.
+// This operation is supported only for Managed Service for Apache Flink.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2104,8 +2107,7 @@ func (c *KinesisAnalyticsV2) ListApplicationVersionsRequest(input *ListApplicati
// To get the complete description of a specific application version, invoke
// the DescribeApplicationVersion operation.
//
-// This operation is supported only for Amazon Kinesis Data Analytics for Apache
-// Flink.
+// This operation is supported only for Managed Service for Apache Flink.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2191,9 +2193,9 @@ func (c *KinesisAnalyticsV2) ListApplicationsRequest(input *ListApplicationsInpu
// ListApplications API operation for Amazon Kinesis Analytics.
//
-// Returns a list of Kinesis Data Analytics applications in your account. For
-// each application, the response includes the application name, Amazon Resource
-// Name (ARN), and status.
+// Returns a list of Managed Service for Apache Flink applications in your account.
+// For each application, the response includes the application name, Amazon
+// Resource Name (ARN), and status.
//
// If you want detailed information about a specific application, use DescribeApplication.
//
@@ -2368,10 +2370,11 @@ func (c *KinesisAnalyticsV2) RollbackApplicationRequest(input *RollbackApplicati
// status.
//
// When you rollback an application, it loads state data from the last successful
-// snapshot. If the application has no snapshots, Kinesis Data Analytics rejects
-// the rollback request.
+// snapshot. If the application has no snapshots, Managed Service for Apache
+// Flink rejects the rollback request.
//
-// This action is not supported for Kinesis Data Analytics for SQL applications.
+// This action is not supported for Managed Service for Apache Flink for SQL
+// applications.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2469,8 +2472,9 @@ func (c *KinesisAnalyticsV2) StartApplicationRequest(input *StartApplicationInpu
// StartApplication API operation for Amazon Kinesis Analytics.
//
-// Starts the specified Kinesis Data Analytics application. After creating an
-// application, you must exclusively call this operation to start your application.
+// Starts the specified Managed Service for Apache Flink application. After
+// creating an application, you must exclusively call this operation to start
+// your application.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2567,8 +2571,8 @@ func (c *KinesisAnalyticsV2) StopApplicationRequest(input *StopApplicationInput)
//
// You can use the DescribeApplication operation to find the application status.
//
-// Kinesis Data Analytics takes a snapshot when the application is stopped,
-// unless Force is set to true.
+// Managed Service for Apache Flink takes a snapshot when the application is
+// stopped, unless Force is set to true.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2665,7 +2669,7 @@ func (c *KinesisAnalyticsV2) TagResourceRequest(input *TagResourceInput) (req *r
// TagResource API operation for Amazon Kinesis Analytics.
//
-// Adds one or more key-value tags to a Kinesis Data Analytics application.
+// Adds one or more key-value tags to a Managed Service for Apache Flink application.
// Note that the maximum number of application tags includes system tags. The
// maximum number of user-defined application tags is 50. For more information,
// see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html).
@@ -2764,8 +2768,8 @@ func (c *KinesisAnalyticsV2) UntagResourceRequest(input *UntagResourceInput) (re
// UntagResource API operation for Amazon Kinesis Analytics.
//
-// Removes one or more tags from a Kinesis Data Analytics application. For more
-// information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html).
+// Removes one or more tags from a Managed Service for Apache Flink application.
+// For more information, see Using Tagging (https://docs.aws.amazon.com/kinesisanalytics/latest/java/how-tagging.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2860,15 +2864,12 @@ func (c *KinesisAnalyticsV2) UpdateApplicationRequest(input *UpdateApplicationIn
// UpdateApplication API operation for Amazon Kinesis Analytics.
//
-// Updates an existing Kinesis Data Analytics application. Using this operation,
-// you can update application code, input configuration, and output configuration.
-//
-// Kinesis Data Analytics updates the ApplicationVersionId each time you update
-// your application.
+// Updates an existing Managed Service for Apache Flink application. Using this
+// operation, you can update application code, input configuration, and output
+// configuration.
//
-// You cannot update the RuntimeEnvironment of an existing application. If you
-// need to update an application's RuntimeEnvironment, you must delete the application
-// and create it again.
+// Managed Service for Apache Flink updates the ApplicationVersionId each time
+// you update your application.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -2971,7 +2972,8 @@ func (c *KinesisAnalyticsV2) UpdateApplicationMaintenanceConfigurationRequest(in
// UpdateApplicationMaintenanceConfiguration API operation for Amazon Kinesis Analytics.
//
-// Updates the maintenance configuration of the Kinesis Data Analytics application.
+// Updates the maintenance configuration of the Managed Service for Apache Flink
+// application.
//
// You can invoke this operation on an application that is in one of the two
// following states: READY or RUNNING. If you invoke it when the application
@@ -2987,11 +2989,10 @@ func (c *KinesisAnalyticsV2) UpdateApplicationMaintenanceConfigurationRequest(in
// To see the current maintenance configuration of your application, invoke
// the DescribeApplication operation.
//
-// For information about application maintenance, see Kinesis Data Analytics
-// for Apache Flink Maintenance (https://docs.aws.amazon.com/kinesisanalytics/latest/java/maintenance.html).
+// For information about application maintenance, see Managed Service for Apache
+// Flink for Apache Flink Maintenance (https://docs.aws.amazon.com/kinesisanalytics/latest/java/maintenance.html).
//
-// This operation is supported only for Amazon Kinesis Data Analytics for Apache
-// Flink.
+// This operation is supported only for Managed Service for Apache Flink.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@@ -3062,8 +3063,8 @@ type AddApplicationCloudWatchLoggingOptionInput struct {
// of CurrentApplicationVersionId.
ConditionalToken *string `min:"1" type:"string"`
- // The version ID of the Kinesis Data Analytics application. You must provide
- // the CurrentApplicationVersionId or the ConditionalToken.You can retrieve
+ // The version ID of the SQL-based Kinesis Data Analytics application. You must
+ // provide the CurrentApplicationVersionId or the ConditionalToken.You can retrieve
// the application version ID using DescribeApplication. For better concurrency
// support, use the ConditionalToken parameter instead of CurrentApplicationVersionId.
CurrentApplicationVersionId *int64 `min:"1" type:"long"`
@@ -3147,13 +3148,13 @@ type AddApplicationCloudWatchLoggingOptionOutput struct {
// The application's ARN.
ApplicationARN *string `min:"1" type:"string"`
- // The new version ID of the Kinesis Data Analytics application. Kinesis Data
- // Analytics updates the ApplicationVersionId each time you change the CloudWatch
- // logging options.
+ // The new version ID of the SQL-based Kinesis Data Analytics application. Kinesis
+ // Data Analytics updates the ApplicationVersionId each time you change the
+ // CloudWatch logging options.
ApplicationVersionId *int64 `min:"1" type:"long"`
- // The descriptions of the current CloudWatch logging options for the Kinesis
- // Data Analytics application.
+ // The descriptions of the current CloudWatch logging options for the SQL-based
+ // Kinesis Data Analytics application.
CloudWatchLoggingOptionDescriptions []*CloudWatchLoggingOptionDescription `type:"list"`
}
@@ -3890,8 +3891,8 @@ type AddApplicationVpcConfigurationOutput struct {
// The ARN of the application.
ApplicationARN *string `min:"1" type:"string"`
- // Provides the current application version. Kinesis Data Analytics updates
- // the ApplicationVersionId each time you update the application.
+ // Provides the current application version. Managed Service for Apache Flink
+ // updates the ApplicationVersionId each time you update the application.
ApplicationVersionId *int64 `min:"1" type:"long"`
// The parameters of the new VPC configuration.
@@ -4039,7 +4040,7 @@ func (s *ApplicationCodeConfigurationDescription) SetCodeContentType(v string) *
}
// Describes code configuration updates for an application. This is supported
-// for a Flink-based Kinesis Data Analytics application or a SQL-based Kinesis
+// for a Managed Service for Apache Flink application or a SQL-based Kinesis
// Data Analytics application.
type ApplicationCodeConfigurationUpdate struct {
_ struct{} `type:"structure"`
@@ -4096,22 +4097,23 @@ func (s *ApplicationCodeConfigurationUpdate) SetCodeContentUpdate(v *CodeContent
return s
}
-// Specifies the creation parameters for a Kinesis Data Analytics application.
+// Specifies the creation parameters for a Managed Service for Apache Flink
+// application.
type ApplicationConfiguration struct {
_ struct{} `type:"structure"`
- // The code location and type parameters for a Flink-based Kinesis Data Analytics
+ // The code location and type parameters for a Managed Service for Apache Flink
// application.
ApplicationCodeConfiguration *ApplicationCodeConfiguration `type:"structure"`
- // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
- // application.
+ // Describes whether snapshots are enabled for a Managed Service for Apache
+ // Flink application.
ApplicationSnapshotConfiguration *ApplicationSnapshotConfiguration `type:"structure"`
- // Describes execution properties for a Flink-based Kinesis Data Analytics application.
+ // Describes execution properties for a Managed Service for Apache Flink application.
EnvironmentProperties *EnvironmentProperties `type:"structure"`
- // The creation and update parameters for a Flink-based Kinesis Data Analytics
+ // The creation and update parameters for a Managed Service for Apache Flink
// application.
FlinkApplicationConfiguration *FlinkApplicationConfiguration `type:"structure"`
@@ -4122,7 +4124,8 @@ type ApplicationConfiguration struct {
// The array of descriptions of VPC configurations available to the application.
VpcConfigurations []*VpcConfiguration `type:"list"`
- // The configuration parameters for a Kinesis Data Analytics Studio notebook.
+ // The configuration parameters for a Managed Service for Apache Flink Studio
+ // notebook.
ZeppelinApplicationConfiguration *ZeppelinApplicationConfiguration `type:"structure"`
}
@@ -4237,25 +4240,26 @@ func (s *ApplicationConfiguration) SetZeppelinApplicationConfiguration(v *Zeppel
}
// Describes details about the application code and starting parameters for
-// a Kinesis Data Analytics application.
+// a Managed Service for Apache Flink application.
type ApplicationConfigurationDescription struct {
_ struct{} `type:"structure"`
- // The details about the application code for a Flink-based Kinesis Data Analytics
+ // The details about the application code for a Managed Service for Apache Flink
// application.
ApplicationCodeConfigurationDescription *ApplicationCodeConfigurationDescription `type:"structure"`
- // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
- // application.
+ // Describes whether snapshots are enabled for a Managed Service for Apache
+ // Flink application.
ApplicationSnapshotConfigurationDescription *ApplicationSnapshotConfigurationDescription `type:"structure"`
- // Describes execution properties for a Flink-based Kinesis Data Analytics application.
+ // Describes execution properties for a Managed Service for Apache Flink application.
EnvironmentPropertyDescriptions *EnvironmentPropertyDescriptions `type:"structure"`
- // The details about a Flink-based Kinesis Data Analytics application.
+ // The details about a Managed Service for Apache Flink application.
FlinkApplicationConfigurationDescription *FlinkApplicationConfigurationDescription `type:"structure"`
- // The details about the starting properties for a Kinesis Data Analytics application.
+ // The details about the starting properties for a Managed Service for Apache
+ // Flink application.
RunConfigurationDescription *RunConfigurationDescription `type:"structure"`
// The details about inputs, outputs, and reference data sources for a SQL-based
@@ -4265,7 +4269,8 @@ type ApplicationConfigurationDescription struct {
// The array of descriptions of VPC configurations available to the application.
VpcConfigurationDescriptions []*VpcConfigurationDescription `type:"list"`
- // The configuration parameters for a Kinesis Data Analytics Studio notebook.
+ // The configuration parameters for a Managed Service for Apache Flink Studio
+ // notebook.
ZeppelinApplicationConfigurationDescription *ZeppelinApplicationConfigurationDescription `type:"structure"`
}
@@ -4342,15 +4347,15 @@ type ApplicationConfigurationUpdate struct {
// Describes updates to an application's code configuration.
ApplicationCodeConfigurationUpdate *ApplicationCodeConfigurationUpdate `type:"structure"`
- // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
- // application.
+ // Describes whether snapshots are enabled for a Managed Service for Apache
+ // Flink application.
ApplicationSnapshotConfigurationUpdate *ApplicationSnapshotConfigurationUpdate `type:"structure"`
- // Describes updates to the environment properties for a Flink-based Kinesis
- // Data Analytics application.
+ // Describes updates to the environment properties for a Managed Service for
+ // Apache Flink application.
EnvironmentPropertyUpdates *EnvironmentPropertyUpdates `type:"structure"`
- // Describes updates to a Flink-based Kinesis Data Analytics application's configuration.
+ // Describes updates to a Managed Service for Apache Flink application's configuration.
FlinkApplicationConfigurationUpdate *FlinkApplicationConfigurationUpdate `type:"structure"`
// Describes updates to a SQL-based Kinesis Data Analytics application's configuration.
@@ -4360,7 +4365,8 @@ type ApplicationConfigurationUpdate struct {
// application.
VpcConfigurationUpdates []*VpcConfigurationUpdate `type:"list"`
- // Updates to the configuration of a Kinesis Data Analytics Studio notebook.
+ // Updates to the configuration of a Managed Service for Apache Flink Studio
+ // notebook.
ZeppelinApplicationConfigurationUpdate *ZeppelinApplicationConfigurationUpdate `type:"structure"`
}
@@ -4485,7 +4491,7 @@ type ApplicationDetail struct {
ApplicationARN *string `min:"1" type:"string" required:"true"`
// Describes details about the application code and starting parameters for
- // a Kinesis Data Analytics application.
+ // a Managed Service for Apache Flink application.
ApplicationConfigurationDescription *ApplicationConfigurationDescription `type:"structure"`
// The description of the application.
@@ -4494,9 +4500,9 @@ type ApplicationDetail struct {
// The details of the maintenance configuration for the application.
ApplicationMaintenanceConfigurationDescription *ApplicationMaintenanceConfigurationDescription `type:"structure"`
- // To create a Kinesis Data Analytics Studio notebook, you must set the mode
- // to INTERACTIVE. However, for a Kinesis Data Analytics for Apache Flink application,
- // the mode is optional.
+ // To create a Managed Service for Apache Flink Studio notebook, you must set
+ // the mode to INTERACTIVE. However, for a Managed Service for Apache Flink
+ // application, the mode is optional.
ApplicationMode *string `type:"string" enum:"ApplicationMode"`
// The name of the application.
@@ -4509,8 +4515,8 @@ type ApplicationDetail struct {
// ApplicationStatus is a required field
ApplicationStatus *string `type:"string" required:"true" enum:"ApplicationStatus"`
- // Provides the current application version. Kinesis Data Analytics updates
- // the ApplicationVersionId each time you update the application.
+ // Provides the current application version. Managed Service for Apache Flink
+ // updates the ApplicationVersionId each time you update the application.
//
// ApplicationVersionId is a required field
ApplicationVersionId *int64 `min:"1" type:"long" required:"true"`
@@ -4824,13 +4830,13 @@ func (s *ApplicationRestoreConfiguration) SetSnapshotName(v string) *Application
return s
}
-// Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
-// application.
+// Describes whether snapshots are enabled for a Managed Service for Apache
+// Flink application.
type ApplicationSnapshotConfiguration struct {
_ struct{} `type:"structure"`
- // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
- // application.
+ // Describes whether snapshots are enabled for a Managed Service for Apache
+ // Flink application.
//
// SnapshotsEnabled is a required field
SnapshotsEnabled *bool `type:"boolean" required:"true"`
@@ -4873,13 +4879,13 @@ func (s *ApplicationSnapshotConfiguration) SetSnapshotsEnabled(v bool) *Applicat
return s
}
-// Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
-// application.
+// Describes whether snapshots are enabled for a Managed Service for Apache
+// Flink application.
type ApplicationSnapshotConfigurationDescription struct {
_ struct{} `type:"structure"`
- // Describes whether snapshots are enabled for a Flink-based Kinesis Data Analytics
- // application.
+ // Describes whether snapshots are enabled for a Managed Service for Apache
+ // Flink application.
//
// SnapshotsEnabled is a required field
SnapshotsEnabled *bool `type:"boolean" required:"true"`
@@ -4909,8 +4915,8 @@ func (s *ApplicationSnapshotConfigurationDescription) SetSnapshotsEnabled(v bool
return s
}
-// Describes updates to whether snapshots are enabled for a Flink-based Kinesis
-// Data Analytics application.
+// Describes updates to whether snapshots are enabled for a Managed Service
+// for Apache Flink application.
type ApplicationSnapshotConfigurationUpdate struct {
_ struct{} `type:"structure"`
@@ -4967,8 +4973,8 @@ type ApplicationSummary struct {
// ApplicationARN is a required field
ApplicationARN *string `min:"1" type:"string" required:"true"`
- // For a Kinesis Data Analytics for Apache Flink application, the mode is STREAMING.
- // For a Kinesis Data Analytics Studio notebook, it is INTERACTIVE.
+ // For a Managed Service for Apache Flink application, the mode is STREAMING.
+ // For a Managed Service for Apache Flink Studio notebook, it is INTERACTIVE.
ApplicationMode *string `type:"string" enum:"ApplicationMode"`
// The name of the application.
@@ -5055,8 +5061,8 @@ type ApplicationVersionSummary struct {
// ApplicationStatus is a required field
ApplicationStatus *string `type:"string" required:"true" enum:"ApplicationStatus"`
- // The ID of the application version. Kinesis Data Analytics updates the ApplicationVersionId
- // each time you update the application.
+ // The ID of the application version. Managed Service for Apache Flink updates
+ // the ApplicationVersionId each time you update the application.
//
// ApplicationVersionId is a required field
ApplicationVersionId *int64 `min:"1" type:"long" required:"true"`
@@ -5169,14 +5175,14 @@ func (s *CSVMappingParameters) SetRecordRowDelimiter(v string) *CSVMappingParame
}
// The configuration parameters for the default Amazon Glue database. You use
-// this database for SQL queries that you write in a Kinesis Data Analytics
-// Studio notebook.
+// this database for SQL queries that you write in a Managed Service for Apache
+// Flink Studio notebook.
type CatalogConfiguration struct {
_ struct{} `type:"structure"`
// The configuration parameters for the default Amazon Glue database. You use
// this database for Apache Flink SQL queries and table API transforms that
- // you write in a Kinesis Data Analytics Studio notebook.
+ // you write in a Managed Service for Apache Flink Studio notebook.
//
// GlueDataCatalogConfiguration is a required field
GlueDataCatalogConfiguration *GlueDataCatalogConfiguration `type:"structure" required:"true"`
@@ -5226,13 +5232,13 @@ func (s *CatalogConfiguration) SetGlueDataCatalogConfiguration(v *GlueDataCatalo
// The configuration parameters for the default Amazon Glue database. You use
// this database for Apache Flink SQL queries and table API transforms that
-// you write in a Kinesis Data Analytics Studio notebook.
+// you write in a Managed Service for Apache Flink Studio notebook.
type CatalogConfigurationDescription struct {
_ struct{} `type:"structure"`
// The configuration parameters for the default Amazon Glue database. You use
- // this database for SQL queries that you write in a Kinesis Data Analytics
- // Studio notebook.
+ // this database for SQL queries that you write in a Managed Service for Apache
+ // Flink Studio notebook.
//
// GlueDataCatalogConfigurationDescription is a required field
GlueDataCatalogConfigurationDescription *GlueDataCatalogConfigurationDescription `type:"structure" required:"true"`
@@ -5263,14 +5269,14 @@ func (s *CatalogConfigurationDescription) SetGlueDataCatalogConfigurationDescrip
}
// Updates to the configuration parameters for the default Amazon Glue database.
-// You use this database for SQL queries that you write in a Kinesis Data Analytics
-// Studio notebook.
+// You use this database for SQL queries that you write in a Managed Service
+// for Apache Flink Studio notebook.
type CatalogConfigurationUpdate struct {
_ struct{} `type:"structure"`
// Updates to the configuration parameters for the default Amazon Glue database.
- // You use this database for SQL queries that you write in a Kinesis Data Analytics
- // Studio notebook.
+ // You use this database for SQL queries that you write in a Managed Service
+ // for Apache Flink Studio notebook.
//
// GlueDataCatalogConfigurationUpdate is a required field
GlueDataCatalogConfigurationUpdate *GlueDataCatalogConfigurationUpdate `type:"structure" required:"true"`
@@ -5332,17 +5338,18 @@ type CheckpointConfiguration struct {
// another value using this API or in application code.
CheckpointInterval *int64 `min:"1" type:"long"`
- // Describes whether checkpointing is enabled for a Flink-based Kinesis Data
- // Analytics application.
+ // Describes whether checkpointing is enabled for a Managed Service for Apache
+ // Flink application.
//
// If CheckpointConfiguration.ConfigurationType is DEFAULT, the application
// will use a CheckpointingEnabled value of true, even if this value is set
// to another value using this API or in application code.
CheckpointingEnabled *bool `type:"boolean"`
- // Describes whether the application uses Kinesis Data Analytics' default checkpointing
- // behavior. You must set this property to CUSTOM in order to set the CheckpointingEnabled,
- // CheckpointInterval, or MinPauseBetweenCheckpoints parameters.
+ // Describes whether the application uses Managed Service for Apache Flink'
+ // default checkpointing behavior. You must set this property to CUSTOM in order
+ // to set the CheckpointingEnabled, CheckpointInterval, or MinPauseBetweenCheckpoints
+ // parameters.
//
// If this value is set to DEFAULT, the application will use the following values,
// even if they are set to other values using APIs or application code:
@@ -5426,7 +5433,7 @@ func (s *CheckpointConfiguration) SetMinPauseBetweenCheckpoints(v int64) *Checkp
return s
}
-// Describes checkpointing parameters for a Flink-based Kinesis Data Analytics
+// Describes checkpointing parameters for a Managed Service for Apache Flink
// application.
type CheckpointConfigurationDescription struct {
_ struct{} `type:"structure"`
@@ -5438,8 +5445,8 @@ type CheckpointConfigurationDescription struct {
// another value using this API or in application code.
CheckpointInterval *int64 `min:"1" type:"long"`
- // Describes whether checkpointing is enabled for a Flink-based Kinesis Data
- // Analytics application.
+ // Describes whether checkpointing is enabled for a Managed Service for Apache
+ // Flink application.
//
// If CheckpointConfiguration.ConfigurationType is DEFAULT, the application
// will use a CheckpointingEnabled value of true, even if this value is set
@@ -5447,7 +5454,7 @@ type CheckpointConfigurationDescription struct {
CheckpointingEnabled *bool `type:"boolean"`
// Describes whether the application uses the default checkpointing behavior
- // in Kinesis Data Analytics.
+ // in Managed Service for Apache Flink.
//
// If this value is set to DEFAULT, the application will use the following values,
// even if they are set to other values using APIs or application code:
@@ -5510,8 +5517,8 @@ func (s *CheckpointConfigurationDescription) SetMinPauseBetweenCheckpoints(v int
return s
}
-// Describes updates to the checkpointing parameters for a Flink-based Kinesis
-// Data Analytics application.
+// Describes updates to the checkpointing parameters for a Managed Service for
+// Apache Flink application.
type CheckpointConfigurationUpdate struct {
_ struct{} `type:"structure"`
@@ -5530,9 +5537,9 @@ type CheckpointConfigurationUpdate struct {
CheckpointingEnabledUpdate *bool `type:"boolean"`
// Describes updates to whether the application uses the default checkpointing
- // behavior of Kinesis Data Analytics. You must set this property to CUSTOM
- // in order to set the CheckpointingEnabled, CheckpointInterval, or MinPauseBetweenCheckpoints
- // parameters.
+ // behavior of Managed Service for Apache Flink. You must set this property
+ // to CUSTOM in order to set the CheckpointingEnabled, CheckpointInterval, or
+ // MinPauseBetweenCheckpoints parameters.
//
// If this value is set to DEFAULT, the application will use the following values,
// even if they are set to other values using APIs or application code:
@@ -5780,17 +5787,17 @@ func (s *CloudWatchLoggingOptionUpdate) SetLogStreamARNUpdate(v string) *CloudWa
}
// Specifies either the application code, or the location of the application
-// code, for a Flink-based Kinesis Data Analytics application.
+// code, for a Managed Service for Apache Flink application.
type CodeContent struct {
_ struct{} `type:"structure"`
// Information about the Amazon S3 bucket that contains the application code.
S3ContentLocation *S3ContentLocation `type:"structure"`
- // The text-format code for a Flink-based Kinesis Data Analytics application.
+ // The text-format code for a Managed Service for Apache Flink application.
TextContent *string `type:"string"`
- // The zip-format code for a Flink-based Kinesis Data Analytics application.
+ // The zip-format code for a Managed Service for Apache Flink application.
// ZipFileContent is automatically base64 encoded/decoded by the SDK.
ZipFileContent []byte `type:"blob"`
}
@@ -5846,7 +5853,7 @@ func (s *CodeContent) SetZipFileContent(v []byte) *CodeContent {
return s
}
-// Describes details about the code of a Kinesis Data Analytics application.
+// Describes details about the code of a Managed Service for Apache Flink application.
type CodeContentDescription struct {
_ struct{} `type:"structure"`
@@ -6114,8 +6121,9 @@ type CreateApplicationInput struct {
// A summary description of the application.
ApplicationDescription *string `type:"string"`
- // Use the STREAMING mode to create a Kinesis Data Analytics For Flink application.
- // To create a Kinesis Data Analytics Studio notebook, use the INTERACTIVE mode.
+ // Use the STREAMING mode to create a Managed Service for Apache Flink application.
+ // To create a Managed Service for Apache Flink Studio notebook, use the INTERACTIVE
+ // mode.
ApplicationMode *string `type:"string" enum:"ApplicationMode"`
// The name of your application (for example, sample-app).
@@ -6267,8 +6275,8 @@ func (s *CreateApplicationInput) SetTags(v []*Tag) *CreateApplicationInput {
type CreateApplicationOutput struct {
_ struct{} `type:"structure"`
- // In response to your CreateApplication request, Kinesis Data Analytics returns
- // a response with details of the application it created.
+ // In response to your CreateApplication request, Managed Service for Apache
+ // Flink returns a response with details of the application it created.
//
// ApplicationDetail is a required field
ApplicationDetail *ApplicationDetail `type:"structure" required:"true"`
@@ -6507,10 +6515,10 @@ type CustomArtifactConfiguration struct {
// The parameters required to fully specify a Maven reference.
MavenReference *MavenReference `type:"structure"`
- // For a Kinesis Data Analytics application provides a description of an Amazon
- // S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the
- // name of the Amazon S3 object that contains the data, and the version number
- // of the Amazon S3 object that contains the data.
+ // For a Managed Service for Apache Flink application provides a description
+ // of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3
+ // bucket, the name of the Amazon S3 object that contains the data, and the
+ // version number of the Amazon S3 object that contains the data.
S3ContentLocation *S3ContentLocation `type:"structure"`
}
@@ -6584,10 +6592,10 @@ type CustomArtifactConfigurationDescription struct {
// The parameters that are required to specify a Maven dependency.
MavenReferenceDescription *MavenReference `type:"structure"`
- // For a Kinesis Data Analytics application provides a description of an Amazon
- // S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the
- // name of the Amazon S3 object that contains the data, and the version number
- // of the Amazon S3 object that contains the data.
+ // For a Managed Service for Apache Flink application provides a description
+ // of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3
+ // bucket, the name of the Amazon S3 object that contains the data, and the
+ // version number of the Amazon S3 object that contains the data.
S3ContentLocationDescription *S3ContentLocation `type:"structure"`
}
@@ -7449,7 +7457,7 @@ func (s *DeleteApplicationVpcConfigurationInput) SetVpcConfigurationId(v string)
type DeleteApplicationVpcConfigurationOutput struct {
_ struct{} `type:"structure"`
- // The ARN of the Kinesis Data Analytics application.
+ // The ARN of the Managed Service for Apache Flink application.
ApplicationARN *string `min:"1" type:"string"`
// The updated version ID of the application.
@@ -7486,8 +7494,8 @@ func (s *DeleteApplicationVpcConfigurationOutput) SetApplicationVersionId(v int6
return s
}
-// The information required to deploy a Kinesis Data Analytics Studio notebook
-// as an application with durable state.
+// The information required to deploy a Managed Service for Apache Flink Studio
+// notebook as an application with durable state.
type DeployAsApplicationConfiguration struct {
_ struct{} `type:"structure"`
@@ -7635,7 +7643,7 @@ type DescribeApplicationInput struct {
// ApplicationName is a required field
ApplicationName *string `min:"1" type:"string" required:"true"`
- // Displays verbose information about a Kinesis Data Analytics application,
+ // Displays verbose information about a Managed Service for Apache Flink application,
// including the application's job plan.
IncludeAdditionalDetails *bool `type:"boolean"`
}
@@ -7973,7 +7981,7 @@ type DiscoverInputSchemaInput struct {
InputProcessingConfiguration *InputProcessingConfiguration `type:"structure"`
// The point at which you want Kinesis Data Analytics to start reading records
- // from the specified streaming source discovery purposes.
+ // from the specified streaming source for discovery purposes.
InputStartingPositionConfiguration *InputStartingPositionConfiguration `type:"structure"`
// The Amazon Resource Name (ARN) of the streaming source.
@@ -8127,7 +8135,7 @@ func (s *DiscoverInputSchemaOutput) SetRawInputRecords(v []*string) *DiscoverInp
return s
}
-// Describes execution properties for a Flink-based Kinesis Data Analytics application.
+// Describes execution properties for a Managed Service for Apache Flink application.
type EnvironmentProperties struct {
_ struct{} `type:"structure"`
@@ -8216,8 +8224,8 @@ func (s *EnvironmentPropertyDescriptions) SetPropertyGroupDescriptions(v []*Prop
return s
}
-// Describes updates to the execution property groups for a Flink-based Kinesis
-// Data Analytics application or a Studio notebook.
+// Describes updates to the execution property groups for a Managed Service
+// for Apache Flink application or a Studio notebook.
type EnvironmentPropertyUpdates struct {
_ struct{} `type:"structure"`
@@ -8274,7 +8282,7 @@ func (s *EnvironmentPropertyUpdates) SetPropertyGroups(v []*PropertyGroup) *Envi
return s
}
-// Describes configuration parameters for a Flink-based Kinesis Data Analytics
+// Describes configuration parameters for a Managed Service for Apache Flink
// application or a Studio notebook.
type FlinkApplicationConfiguration struct {
_ struct{} `type:"structure"`
@@ -8353,7 +8361,7 @@ func (s *FlinkApplicationConfiguration) SetParallelismConfiguration(v *Paralleli
return s
}
-// Describes configuration parameters for a Flink-based Kinesis Data Analytics
+// Describes configuration parameters for a Managed Service for Apache Flink
// application.
type FlinkApplicationConfigurationDescription struct {
_ struct{} `type:"structure"`
@@ -8418,8 +8426,8 @@ func (s *FlinkApplicationConfigurationDescription) SetParallelismConfigurationDe
return s
}
-// Describes updates to the configuration parameters for a Flink-based Kinesis
-// Data Analytics application.
+// Describes updates to the configuration parameters for a Managed Service for
+// Apache Flink application.
type FlinkApplicationConfigurationUpdate struct {
_ struct{} `type:"structure"`
@@ -8492,7 +8500,7 @@ func (s *FlinkApplicationConfigurationUpdate) SetParallelismConfigurationUpdate(
return s
}
-// Describes the starting parameters for a Flink-based Kinesis Data Analytics
+// Describes the starting parameters for a Managed Service for Apache Flink
// application.
type FlinkRunConfiguration struct {
_ struct{} `type:"structure"`
@@ -8621,7 +8629,7 @@ func (s *GlueDataCatalogConfigurationDescription) SetDatabaseARN(v string) *Glue
}
// Updates to the configuration of the Glue Data Catalog that you use for SQL
-// queries that you write in a Kinesis Data Analytics Studio notebook.
+// queries that you write in a Managed Service for Apache Flink Studio notebook.
type GlueDataCatalogConfigurationUpdate struct {
_ struct{} `type:"structure"`
@@ -11748,40 +11756,40 @@ func (s *OutputUpdate) SetOutputId(v string) *OutputUpdate {
return s
}
-// Describes parameters for how a Flink-based Kinesis Data Analytics application
+// Describes parameters for how a Managed Service for Apache Flink application
// executes multiple tasks simultaneously. For more information about parallelism,
// see Parallel Execution (https://ci.apache.org/projects/flink/flink-docs-release-1.8/dev/parallel.html)
// in the Apache Flink Documentation (https://ci.apache.org/projects/flink/flink-docs-release-1.8/).
type ParallelismConfiguration struct {
_ struct{} `type:"structure"`
- // Describes whether the Kinesis Data Analytics service can increase the parallelism
- // of the application in response to increased throughput.
+ // Describes whether the Managed Service for Apache Flink service can increase
+ // the parallelism of the application in response to increased throughput.
AutoScalingEnabled *bool `type:"boolean"`
- // Describes whether the application uses the default parallelism for the Kinesis
- // Data Analytics service. You must set this property to CUSTOM in order to
- // change your application's AutoScalingEnabled, Parallelism, or ParallelismPerKPU
+ // Describes whether the application uses the default parallelism for the Managed
+ // Service for Apache Flink service. You must set this property to CUSTOM in
+ // order to change your application's AutoScalingEnabled, Parallelism, or ParallelismPerKPU
// properties.
//
// ConfigurationType is a required field
ConfigurationType *string `type:"string" required:"true" enum:"ConfigurationType"`
- // Describes the initial number of parallel tasks that a Flink-based Kinesis
- // Data Analytics application can perform. If AutoScalingEnabled is set to True,
- // Kinesis Data Analytics increases the CurrentParallelism value in response
- // to application load. The service can increase the CurrentParallelism value
- // up to the maximum parallelism, which is ParalellismPerKPU times the maximum
- // KPUs for the application. The maximum KPUs for an application is 32 by default,
- // and can be increased by requesting a limit increase. If application load
- // is reduced, the service can reduce the CurrentParallelism value down to the
- // Parallelism setting.
+ // Describes the initial number of parallel tasks that a Managed Service for
+ // Apache Flink application can perform. If AutoScalingEnabled is set to True,
+ // Managed Service for Apache Flink increases the CurrentParallelism value in
+ // response to application load. The service can increase the CurrentParallelism
+ // value up to the maximum parallelism, which is ParalellismPerKPU times the
+ // maximum KPUs for the application. The maximum KPUs for an application is
+ // 32 by default, and can be increased by requesting a limit increase. If application
+ // load is reduced, the service can reduce the CurrentParallelism value down
+ // to the Parallelism setting.
Parallelism *int64 `min:"1" type:"integer"`
- // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics
- // application can perform per Kinesis Processing Unit (KPU) used by the application.
- // For more information about KPUs, see Amazon Kinesis Data Analytics Pricing
- // (http://aws.amazon.com/kinesis/data-analytics/pricing/).
+ // Describes the number of parallel tasks that a Managed Service for Apache
+ // Flink application can perform per Kinesis Processing Unit (KPU) used by the
+ // application. For more information about KPUs, see Amazon Managed Service
+ // for Apache Flink Pricing (http://aws.amazon.com/kinesis/data-analytics/pricing/).
ParallelismPerKPU *int64 `min:"1" type:"integer"`
}
@@ -11846,22 +11854,22 @@ func (s *ParallelismConfiguration) SetParallelismPerKPU(v int64) *ParallelismCon
return s
}
-// Describes parameters for how a Flink-based Kinesis Data Analytics application
+// Describes parameters for how a Managed Service for Apache Flink application
// executes multiple tasks simultaneously.
type ParallelismConfigurationDescription struct {
_ struct{} `type:"structure"`
- // Describes whether the Kinesis Data Analytics service can increase the parallelism
- // of the application in response to increased throughput.
+ // Describes whether the Managed Service for Apache Flink service can increase
+ // the parallelism of the application in response to increased throughput.
AutoScalingEnabled *bool `type:"boolean"`
- // Describes whether the application uses the default parallelism for the Kinesis
- // Data Analytics service.
+ // Describes whether the application uses the default parallelism for the Managed
+ // Service for Apache Flink service.
ConfigurationType *string `type:"string" enum:"ConfigurationType"`
- // Describes the current number of parallel tasks that a Flink-based Kinesis
- // Data Analytics application can perform. If AutoScalingEnabled is set to True,
- // Kinesis Data Analytics can increase this value in response to application
+ // Describes the current number of parallel tasks that a Managed Service for
+ // Apache Flink application can perform. If AutoScalingEnabled is set to True,
+ // Managed Service for Apache Flink can increase this value in response to application
// load. The service can increase this value up to the maximum parallelism,
// which is ParalellismPerKPU times the maximum KPUs for the application. The
// maximum KPUs for an application is 32 by default, and can be increased by
@@ -11869,10 +11877,10 @@ type ParallelismConfigurationDescription struct {
// can reduce the CurrentParallelism value down to the Parallelism setting.
CurrentParallelism *int64 `min:"1" type:"integer"`
- // Describes the initial number of parallel tasks that a Flink-based Kinesis
- // Data Analytics application can perform. If AutoScalingEnabled is set to True,
- // then Kinesis Data Analytics can increase the CurrentParallelism value in
- // response to application load. The service can increase CurrentParallelism
+ // Describes the initial number of parallel tasks that a Managed Service for
+ // Apache Flink application can perform. If AutoScalingEnabled is set to True,
+ // then Managed Service for Apache Flink can increase the CurrentParallelism
+ // value in response to application load. The service can increase CurrentParallelism
// up to the maximum parallelism, which is ParalellismPerKPU times the maximum
// KPUs for the application. The maximum KPUs for an application is 32 by default,
// and can be increased by requesting a limit increase. If application load
@@ -11880,8 +11888,9 @@ type ParallelismConfigurationDescription struct {
// Parallelism setting.
Parallelism *int64 `min:"1" type:"integer"`
- // Describes the number of parallel tasks that a Flink-based Kinesis Data Analytics
- // application can perform per Kinesis Processing Unit (KPU) used by the application.
+ // Describes the number of parallel tasks that a Managed Service for Apache
+ // Flink application can perform per Kinesis Processing Unit (KPU) used by the
+ // application.
ParallelismPerKPU *int64 `min:"1" type:"integer"`
}
@@ -11938,14 +11947,14 @@ func (s *ParallelismConfigurationDescription) SetParallelismPerKPU(v int64) *Par
type ParallelismConfigurationUpdate struct {
_ struct{} `type:"structure"`
- // Describes updates to whether the Kinesis Data Analytics service can increase
- // the parallelism of a Flink-based Kinesis Data Analytics application in response
- // to increased throughput.
+ // Describes updates to whether the Managed Service for Apache Flink service
+ // can increase the parallelism of a Managed Service for Apache Flink application
+ // in response to increased throughput.
AutoScalingEnabledUpdate *bool `type:"boolean"`
// Describes updates to whether the application uses the default parallelism
- // for the Kinesis Data Analytics service, or if a custom parallelism is used.
- // You must set this property to CUSTOM in order to change your application's
+ // for the Managed Service for Apache Flink service, or if a custom parallelism
+ // is used. You must set this property to CUSTOM in order to change your application's
// AutoScalingEnabled, Parallelism, or ParallelismPerKPU properties.
ConfigurationTypeUpdate *string `type:"string" enum:"ConfigurationType"`
@@ -11954,9 +11963,9 @@ type ParallelismConfigurationUpdate struct {
ParallelismPerKPUUpdate *int64 `min:"1" type:"integer"`
// Describes updates to the initial number of parallel tasks an application
- // can perform. If AutoScalingEnabled is set to True, then Kinesis Data Analytics
- // can increase the CurrentParallelism value in response to application load.
- // The service can increase CurrentParallelism up to the maximum parallelism,
+ // can perform. If AutoScalingEnabled is set to True, then Managed Service for
+ // Apache Flink can increase the CurrentParallelism value in response to application
+ // load. The service can increase CurrentParallelism up to the maximum parallelism,
// which is ParalellismPerKPU times the maximum KPUs for the application. The
// maximum KPUs for an application is 32 by default, and can be increased by
// requesting a limit increase. If application load is reduced, the service
@@ -12252,10 +12261,10 @@ type ReferenceDataSource struct {
// ReferenceSchema is a required field
ReferenceSchema *SourceSchema `type:"structure" required:"true"`
- // Identifies the S3 bucket and object that contains the reference data. A Kinesis
- // Data Analytics application loads reference data only once. If the data changes,
- // you call the UpdateApplication operation to trigger reloading of data into
- // your application.
+ // Identifies the S3 bucket and object that contains the reference data. A SQL-based
+ // Kinesis Data Analytics application loads reference data only once. If the
+ // data changes, you call the UpdateApplication operation to trigger reloading
+ // of data into your application.
S3ReferenceDataSource *S3ReferenceDataSource `type:"structure"`
// The name of the in-application table to create.
@@ -12794,14 +12803,15 @@ func (s *RollbackApplicationOutput) SetApplicationDetail(v *ApplicationDetail) *
return s
}
-// Describes the starting parameters for an Kinesis Data Analytics application.
+// Describes the starting parameters for an Managed Service for Apache Flink
+// application.
type RunConfiguration struct {
_ struct{} `type:"structure"`
// Describes the restore behavior of a restarting application.
ApplicationRestoreConfiguration *ApplicationRestoreConfiguration `type:"structure"`
- // Describes the starting parameters for a Flink-based Kinesis Data Analytics
+ // Describes the starting parameters for a Managed Service for Apache Flink
// application.
FlinkRunConfiguration *FlinkRunConfiguration `type:"structure"`
@@ -12871,14 +12881,15 @@ func (s *RunConfiguration) SetSqlRunConfigurations(v []*SqlRunConfiguration) *Ru
return s
}
-// Describes the starting properties for a Kinesis Data Analytics application.
+// Describes the starting properties for a Managed Service for Apache Flink
+// application.
type RunConfigurationDescription struct {
_ struct{} `type:"structure"`
// Describes the restore behavior of a restarting application.
ApplicationRestoreConfigurationDescription *ApplicationRestoreConfiguration `type:"structure"`
- // Describes the starting parameters for a Flink-based Kinesis Data Analytics
+ // Describes the starting parameters for a Managed Service for Apache Flink
// application.
FlinkRunConfigurationDescription *FlinkRunConfiguration `type:"structure"`
}
@@ -12913,15 +12924,15 @@ func (s *RunConfigurationDescription) SetFlinkRunConfigurationDescription(v *Fli
return s
}
-// Describes the updates to the starting parameters for a Kinesis Data Analytics
-// application.
+// Describes the updates to the starting parameters for a Managed Service for
+// Apache Flink application.
type RunConfigurationUpdate struct {
_ struct{} `type:"structure"`
// Describes updates to the restore behavior of a restarting application.
ApplicationRestoreConfiguration *ApplicationRestoreConfiguration `type:"structure"`
- // Describes the starting parameters for a Flink-based Kinesis Data Analytics
+ // Describes the starting parameters for a Managed Service for Apache Flink
// application.
FlinkRunConfiguration *FlinkRunConfiguration `type:"structure"`
}
@@ -13257,10 +13268,10 @@ func (s *S3ContentBaseLocationUpdate) SetBucketARNUpdate(v string) *S3ContentBas
return s
}
-// For a Kinesis Data Analytics application provides a description of an Amazon
-// S3 object, including the Amazon Resource Name (ARN) of the S3 bucket, the
-// name of the Amazon S3 object that contains the data, and the version number
-// of the Amazon S3 object that contains the data.
+// For a Managed Service for Apache Flink application provides a description
+// of an Amazon S3 object, including the Amazon Resource Name (ARN) of the S3
+// bucket, the name of the Amazon S3 object that contains the data, and the
+// version number of the Amazon S3 object that contains the data.
type S3ContentLocation struct {
_ struct{} `type:"structure"`
@@ -13407,9 +13418,9 @@ func (s *S3ContentLocationUpdate) SetObjectVersionUpdate(v string) *S3ContentLoc
// For a SQL-based Kinesis Data Analytics application, identifies the Amazon
// S3 bucket and object that contains the reference data.
//
-// A Kinesis Data Analytics application loads reference data only once. If the
-// data changes, you call the UpdateApplication operation to trigger reloading
-// of data into your application.
+// A SQL-based Kinesis Data Analytics application loads reference data only
+// once. If the data changes, you call the UpdateApplication operation to trigger
+// reloading of data into your application.
type S3ReferenceDataSource struct {
_ struct{} `type:"structure"`
@@ -13658,6 +13669,9 @@ type SnapshotDetails struct {
// ApplicationVersionId is a required field
ApplicationVersionId *int64 `min:"1" type:"long" required:"true"`
+ // The Flink Runtime for the application snapshot.
+ RuntimeEnvironment *string `type:"string" enum:"RuntimeEnvironment"`
+
// The timestamp of the application snapshot.
SnapshotCreationTimestamp *time.Time `type:"timestamp"`
@@ -13696,6 +13710,12 @@ func (s *SnapshotDetails) SetApplicationVersionId(v int64) *SnapshotDetails {
return s
}
+// SetRuntimeEnvironment sets the RuntimeEnvironment field's value.
+func (s *SnapshotDetails) SetRuntimeEnvironment(v string) *SnapshotDetails {
+ s.RuntimeEnvironment = &v
+ return s
+}
+
// SetSnapshotCreationTimestamp sets the SnapshotCreationTimestamp field's value.
func (s *SnapshotDetails) SetSnapshotCreationTimestamp(v time.Time) *SnapshotDetails {
s.SnapshotCreationTimestamp = &v
@@ -14124,8 +14144,8 @@ type StartApplicationInput struct {
// ApplicationName is a required field
ApplicationName *string `min:"1" type:"string" required:"true"`
- // Identifies the run configuration (start parameters) of a Kinesis Data Analytics
- // application.
+ // Identifies the run configuration (start parameters) of a Managed Service
+ // for Apache Flink application.
RunConfiguration *RunConfiguration `type:"structure"`
}
@@ -14210,15 +14230,15 @@ type StopApplicationInput struct {
// ApplicationName is a required field
ApplicationName *string `min:"1" type:"string" required:"true"`
- // Set to true to force the application to stop. If you set Force to true, Kinesis
- // Data Analytics stops the application without taking a snapshot.
+ // Set to true to force the application to stop. If you set Force to true, Managed
+ // Service for Apache Flink stops the application without taking a snapshot.
//
// Force-stopping your application may lead to data loss or duplication. To
// prevent data loss or duplicate processing of data during application restarts,
// we recommend you to take frequent snapshots of your application.
//
- // You can only force stop a Flink-based Kinesis Data Analytics application.
- // You can't force stop a SQL-based Kinesis Data Analytics application.
+ // You can only force stop a Managed Service for Apache Flink application. You
+ // can't force stop a SQL-based Kinesis Data Analytics application.
//
// The application must be in the STARTING, UPDATING, STOPPING, AUTOSCALING,
// or RUNNING status.
@@ -14661,8 +14681,8 @@ func (s *UnsupportedOperationException) RequestID() string {
type UntagResourceInput struct {
_ struct{} `type:"structure"`
- // The ARN of the Kinesis Data Analytics application from which to remove the
- // tags.
+ // The ARN of the Managed Service for Apache Flink application from which to
+ // remove the tags.
//
// ResourceARN is a required field
ResourceARN *string `min:"1" type:"string" required:"true"`
@@ -14779,6 +14799,16 @@ type UpdateApplicationInput struct {
// Describes updates to the application's starting parameters.
RunConfigurationUpdate *RunConfigurationUpdate `type:"structure"`
+ // Updates the Managed Service for Apache Flink runtime environment used to
+ // run your code. To avoid issues you must:
+ //
+ // * Ensure your new jar and dependencies are compatible with the new runtime
+ // selected.
+ //
+ // * Ensure your new code's state is compatible with the snapshot from which
+ // your application will start
+ RuntimeEnvironmentUpdate *string `type:"string" enum:"RuntimeEnvironment"`
+
// Describes updates to the service execution role.
ServiceExecutionRoleUpdate *string `min:"1" type:"string"`
}
@@ -14882,6 +14912,12 @@ func (s *UpdateApplicationInput) SetRunConfigurationUpdate(v *RunConfigurationUp
return s
}
+// SetRuntimeEnvironmentUpdate sets the RuntimeEnvironmentUpdate field's value.
+func (s *UpdateApplicationInput) SetRuntimeEnvironmentUpdate(v string) *UpdateApplicationInput {
+ s.RuntimeEnvironmentUpdate = &v
+ return s
+}
+
// SetServiceExecutionRoleUpdate sets the ServiceExecutionRoleUpdate field's value.
func (s *UpdateApplicationInput) SetServiceExecutionRoleUpdate(v string) *UpdateApplicationInput {
s.ServiceExecutionRoleUpdate = &v
@@ -15244,22 +15280,23 @@ func (s *VpcConfigurationUpdate) SetVpcConfigurationId(v string) *VpcConfigurati
return s
}
-// The configuration of a Kinesis Data Analytics Studio notebook.
+// The configuration of a Managed Service for Apache Flink Studio notebook.
type ZeppelinApplicationConfiguration struct {
_ struct{} `type:"structure"`
- // The Amazon Glue Data Catalog that you use in queries in a Kinesis Data Analytics
- // Studio notebook.
+ // The Amazon Glue Data Catalog that you use in queries in a Managed Service
+ // for Apache Flink Studio notebook.
CatalogConfiguration *CatalogConfiguration `type:"structure"`
// Custom artifacts are dependency JARs and user-defined functions (UDF).
CustomArtifactsConfiguration []*CustomArtifactConfiguration `type:"list"`
- // The information required to deploy a Kinesis Data Analytics Studio notebook
- // as an application with durable state.
+ // The information required to deploy a Managed Service for Apache Flink Studio
+ // notebook as an application with durable state.
DeployAsApplicationConfiguration *DeployAsApplicationConfiguration `type:"structure"`
- // The monitoring configuration of a Kinesis Data Analytics Studio notebook.
+ // The monitoring configuration of a Managed Service for Apache Flink Studio
+ // notebook.
MonitoringConfiguration *ZeppelinMonitoringConfiguration `type:"structure"`
}
@@ -15340,22 +15377,23 @@ func (s *ZeppelinApplicationConfiguration) SetMonitoringConfiguration(v *Zeppeli
return s
}
-// The configuration of a Kinesis Data Analytics Studio notebook.
+// The configuration of a Managed Service for Apache Flink Studio notebook.
type ZeppelinApplicationConfigurationDescription struct {
_ struct{} `type:"structure"`
- // The Amazon Glue Data Catalog that is associated with the Kinesis Data Analytics
- // Studio notebook.
+ // The Amazon Glue Data Catalog that is associated with the Managed Service
+ // for Apache Flink Studio notebook.
CatalogConfigurationDescription *CatalogConfigurationDescription `type:"structure"`
// Custom artifacts are dependency JARs and user-defined functions (UDF).
CustomArtifactsConfigurationDescription []*CustomArtifactConfigurationDescription `type:"list"`
- // The parameters required to deploy a Kinesis Data Analytics Studio notebook
- // as an application with durable state.
+ // The parameters required to deploy a Managed Service for Apache Flink Studio
+ // notebook as an application with durable state.
DeployAsApplicationConfigurationDescription *DeployAsApplicationConfigurationDescription `type:"structure"`
- // The monitoring configuration of a Kinesis Data Analytics Studio notebook.
+ // The monitoring configuration of a Managed Service for Apache Flink Studio
+ // notebook.
//
// MonitoringConfigurationDescription is a required field
MonitoringConfigurationDescription *ZeppelinMonitoringConfigurationDescription `type:"structure" required:"true"`
@@ -15403,12 +15441,12 @@ func (s *ZeppelinApplicationConfigurationDescription) SetMonitoringConfiguration
return s
}
-// Updates to the configuration of Kinesis Data Analytics Studio notebook.
+// Updates to the configuration of Managed Service for Apache Flink Studio notebook.
type ZeppelinApplicationConfigurationUpdate struct {
_ struct{} `type:"structure"`
// Updates to the configuration of the Amazon Glue Data Catalog that is associated
- // with the Kinesis Data Analytics Studio notebook.
+ // with the Managed Service for Apache Flink Studio notebook.
CatalogConfigurationUpdate *CatalogConfigurationUpdate `type:"structure"`
// Updates to the customer artifacts. Custom artifacts are dependency JAR files
@@ -15419,8 +15457,8 @@ type ZeppelinApplicationConfigurationUpdate struct {
// Analytics Studio notebook as an application with durable state.
DeployAsApplicationConfigurationUpdate *DeployAsApplicationConfigurationUpdate `type:"structure"`
- // Updates to the monitoring configuration of a Kinesis Data Analytics Studio
- // notebook.
+ // Updates to the monitoring configuration of a Managed Service for Apache Flink
+ // Studio notebook.
MonitoringConfigurationUpdate *ZeppelinMonitoringConfigurationUpdate `type:"structure"`
}
@@ -15501,9 +15539,9 @@ func (s *ZeppelinApplicationConfigurationUpdate) SetMonitoringConfigurationUpdat
return s
}
-// Describes configuration parameters for Amazon CloudWatch logging for a Kinesis
-// Data Analytics Studio notebook. For more information about CloudWatch logging,
-// see Monitoring (https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html).
+// Describes configuration parameters for Amazon CloudWatch logging for a Managed
+// Service for Apache Flink Studio notebook. For more information about CloudWatch
+// logging, see Monitoring (https://docs.aws.amazon.com/kinesisanalytics/latest/java/monitoring-overview.html).
type ZeppelinMonitoringConfiguration struct {
_ struct{} `type:"structure"`
@@ -15550,8 +15588,8 @@ func (s *ZeppelinMonitoringConfiguration) SetLogLevel(v string) *ZeppelinMonitor
return s
}
-// The monitoring configuration for Apache Zeppelin within a Kinesis Data Analytics
-// Studio notebook.
+// The monitoring configuration for Apache Zeppelin within a Managed Service
+// for Apache Flink Studio notebook.
type ZeppelinMonitoringConfigurationDescription struct {
_ struct{} `type:"structure"`
@@ -15583,13 +15621,13 @@ func (s *ZeppelinMonitoringConfigurationDescription) SetLogLevel(v string) *Zepp
return s
}
-// Updates to the monitoring configuration for Apache Zeppelin within a Kinesis
-// Data Analytics Studio notebook.
+// Updates to the monitoring configuration for Apache Zeppelin within a Managed
+// Service for Apache Flink Studio notebook.
type ZeppelinMonitoringConfigurationUpdate struct {
_ struct{} `type:"structure"`
- // Updates to the logging level for Apache Zeppelin within a Kinesis Data Analytics
- // Studio notebook.
+ // Updates to the logging level for Apache Zeppelin within a Managed Service
+ // for Apache Flink Studio notebook.
//
// LogLevelUpdate is a required field
LogLevelUpdate *string `type:"string" required:"true" enum:"LogLevel"`
diff --git a/service/kinesisanalyticsv2/doc.go b/service/kinesisanalyticsv2/doc.go
index e01ef5e4c68..a611480ee9a 100644
--- a/service/kinesisanalyticsv2/doc.go
+++ b/service/kinesisanalyticsv2/doc.go
@@ -3,11 +3,14 @@
// Package kinesisanalyticsv2 provides the client and types for making API
// requests to Amazon Kinesis Analytics.
//
-// Amazon Kinesis Data Analytics is a fully managed service that you can use
-// to process and analyze streaming data using Java, SQL, or Scala. The service
-// enables you to quickly author and run Java, SQL, or Scala code against streaming
-// sources to perform time series analytics, feed real-time dashboards, and
-// create real-time metrics.
+// Amazon Managed Service for Apache Flink was previously known as Amazon Kinesis
+// Data Analytics for Apache Flink.
+//
+// Amazon Managed Service for Apache Flink is a fully managed service that you
+// can use to process and analyze streaming data using Java, Python, SQL, or
+// Scala. The service enables you to quickly author and run Java, SQL, or Scala
+// code against streaming sources to perform time series analytics, feed real-time
+// dashboards, and create real-time metrics.
//
// See https://docs.aws.amazon.com/goto/WebAPI/kinesisanalyticsv2-2018-05-23 for more information on this service.
//
diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go
index cf293cccece..9a1147c1419 100644
--- a/service/s3/examples_test.go
+++ b/service/s3/examples_test.go
@@ -125,12 +125,16 @@ func ExampleS3_CopyObject_shared00() {
fmt.Println(result)
}
-// To create a bucket
-// The following example creates a bucket.
+// To create a bucket in a specific region
+// The following example creates a bucket. The request specifies an AWS region where
+// to create the bucket.
func ExampleS3_CreateBucket_shared00() {
svc := s3.New(session.New())
input := &s3.CreateBucketInput{
Bucket: aws.String("examplebucket"),
+ CreateBucketConfiguration: &s3.CreateBucketConfiguration{
+ LocationConstraint: aws.String("eu-west-1"),
+ },
}
result, err := svc.CreateBucket(input)
@@ -155,16 +159,12 @@ func ExampleS3_CreateBucket_shared00() {
fmt.Println(result)
}
-// To create a bucket in a specific region
-// The following example creates a bucket. The request specifies an AWS region where
-// to create the bucket.
+// To create a bucket
+// The following example creates a bucket.
func ExampleS3_CreateBucket_shared01() {
svc := s3.New(session.New())
input := &s3.CreateBucketInput{
Bucket: aws.String("examplebucket"),
- CreateBucketConfiguration: &s3.CreateBucketConfiguration{
- LocationConstraint: aws.String("eu-west-1"),
- },
}
result, err := svc.CreateBucket(input)
@@ -452,15 +452,15 @@ func ExampleS3_DeleteObject_shared01() {
fmt.Println(result)
}
-// To remove tag set from an object version
-// The following example removes tag set associated with the specified object version.
-// The request specifies both the object key and object version.
+// To remove tag set from an object
+// The following example removes tag set associated with the specified object. If the
+// bucket is versioning enabled, the operation removes tag set from the latest object
+// version.
func ExampleS3_DeleteObjectTagging_shared00() {
svc := s3.New(session.New())
input := &s3.DeleteObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
- VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.DeleteObjectTagging(input)
@@ -481,15 +481,15 @@ func ExampleS3_DeleteObjectTagging_shared00() {
fmt.Println(result)
}
-// To remove tag set from an object
-// The following example removes tag set associated with the specified object. If the
-// bucket is versioning enabled, the operation removes tag set from the latest object
-// version.
+// To remove tag set from an object version
+// The following example removes tag set associated with the specified object version.
+// The request specifies both the object key and object version.
func ExampleS3_DeleteObjectTagging_shared01() {
svc := s3.New(session.New())
input := &s3.DeleteObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
+ VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
}
result, err := svc.DeleteObjectTagging(input)
@@ -510,10 +510,10 @@ func ExampleS3_DeleteObjectTagging_shared01() {
fmt.Println(result)
}
-// To delete multiple object versions from a versioned bucket
-// The following example deletes objects from a bucket. The request specifies object
-// versions. S3 deletes specific object versions and returns the key and versions of
-// deleted objects in the response.
+// To delete multiple objects from a versioned bucket
+// The following example deletes objects from a bucket. The bucket is versioned, and
+// the request does not specify the object version to delete. In this case, all versions
+// remain in the bucket and S3 adds a delete marker.
func ExampleS3_DeleteObjects_shared00() {
svc := s3.New(session.New())
input := &s3.DeleteObjectsInput{
@@ -521,12 +521,10 @@ func ExampleS3_DeleteObjects_shared00() {
Delete: &s3.Delete{
Objects: []*s3.ObjectIdentifier{
{
- Key: aws.String("HappyFace.jpg"),
- VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"),
+ Key: aws.String("objectkey1"),
},
{
- Key: aws.String("HappyFace.jpg"),
- VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"),
+ Key: aws.String("objectkey2"),
},
},
Quiet: aws.Bool(false),
@@ -551,10 +549,10 @@ func ExampleS3_DeleteObjects_shared00() {
fmt.Println(result)
}
-// To delete multiple objects from a versioned bucket
-// The following example deletes objects from a bucket. The bucket is versioned, and
-// the request does not specify the object version to delete. In this case, all versions
-// remain in the bucket and S3 adds a delete marker.
+// To delete multiple object versions from a versioned bucket
+// The following example deletes objects from a bucket. The request specifies object
+// versions. S3 deletes specific object versions and returns the key and versions of
+// deleted objects in the response.
func ExampleS3_DeleteObjects_shared01() {
svc := s3.New(session.New())
input := &s3.DeleteObjectsInput{
@@ -562,10 +560,12 @@ func ExampleS3_DeleteObjects_shared01() {
Delete: &s3.Delete{
Objects: []*s3.ObjectIdentifier{
{
- Key: aws.String("objectkey1"),
+ Key: aws.String("HappyFace.jpg"),
+ VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"),
},
{
- Key: aws.String("objectkey2"),
+ Key: aws.String("HappyFace.jpg"),
+ VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"),
},
},
Quiet: aws.Bool(false),
@@ -996,13 +996,15 @@ func ExampleS3_GetObjectAcl_shared00() {
fmt.Println(result)
}
-// To retrieve tag set of an object
-// The following example retrieves tag set of an object.
+// To retrieve tag set of a specific object version
+// The following example retrieves tag set of an object. The request specifies object
+// version.
func ExampleS3_GetObjectTagging_shared00() {
svc := s3.New(session.New())
input := &s3.GetObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
+ VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
}
result, err := svc.GetObjectTagging(input)
@@ -1023,15 +1025,13 @@ func ExampleS3_GetObjectTagging_shared00() {
fmt.Println(result)
}
-// To retrieve tag set of a specific object version
-// The following example retrieves tag set of an object. The request specifies object
-// version.
+// To retrieve tag set of an object
+// The following example retrieves tag set of an object.
func ExampleS3_GetObjectTagging_shared01() {
svc := s3.New(session.New())
input := &s3.GetObjectTaggingInput{
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.GetObjectTagging(input)
@@ -1748,18 +1748,15 @@ func ExampleS3_PutBucketWebsite_shared00() {
fmt.Println(result)
}
-// To upload an object and specify server-side encryption and object tags
-// The following example uploads an object. The request specifies the optional server-side
-// encryption option. The request also specifies optional object tags. If the bucket
-// is versioning enabled, S3 returns version ID in response.
+// To create an object.
+// The following example creates an object. If the bucket is versioning enabled, S3
+// returns version ID in response.
func ExampleS3_PutObject_shared00() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- ServerSideEncryption: aws.String("AES256"),
- Tagging: aws.String("key1=value1&key2=value2"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("objectkey"),
}
result, err := svc.PutObject(input)
@@ -1780,15 +1777,19 @@ func ExampleS3_PutObject_shared00() {
fmt.Println(result)
}
-// To create an object.
-// The following example creates an object. If the bucket is versioning enabled, S3
-// returns version ID in response.
+// To upload object and specify user-defined metadata
+// The following example creates an object. The request also specifies optional metadata.
+// If the bucket is versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared01() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
Bucket: aws.String("examplebucket"),
- Key: aws.String("objectkey"),
+ Key: aws.String("exampleobject"),
+ Metadata: map[string]*string{
+ "metadata1": aws.String("value1"),
+ "metadata2": aws.String("value2"),
+ },
}
result, err := svc.PutObject(input)
@@ -1809,17 +1810,16 @@ func ExampleS3_PutObject_shared01() {
fmt.Println(result)
}
-// To upload an object (specify optional headers)
-// The following example uploads an object. The request specifies optional request headers
-// to directs S3 to use specific storage class and use server-side encryption.
+// To upload an object
+// The following example uploads an object to a versioning-enabled bucket. The source
+// file is specified using Windows file syntax. S3 returns VersionId of the newly created
+// object.
func ExampleS3_PutObject_shared02() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
- ServerSideEncryption: aws.String("AES256"),
- StorageClass: aws.String("STANDARD_IA"),
+ Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
}
result, err := svc.PutObject(input)
@@ -1840,16 +1840,17 @@ func ExampleS3_PutObject_shared02() {
fmt.Println(result)
}
-// To upload an object and specify optional tags
-// The following example uploads an object. The request specifies optional object tags.
-// The bucket is versioned, therefore S3 returns version ID of the newly created object.
+// To upload an object and specify canned ACL.
+// The following example uploads and object. The request specifies optional canned ACL
+// (access control list) to all READ access to authenticated users. If the bucket is
+// versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared03() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
- Tagging: aws.String("key1=value1&key2=value2"),
+ ACL: aws.String("authenticated-read"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
}
result, err := svc.PutObject(input)
@@ -1870,19 +1871,17 @@ func ExampleS3_PutObject_shared03() {
fmt.Println(result)
}
-// To upload object and specify user-defined metadata
-// The following example creates an object. The request also specifies optional metadata.
-// If the bucket is versioning enabled, S3 returns version ID in response.
+// To upload an object (specify optional headers)
+// The following example uploads an object. The request specifies optional request headers
+// to directs S3 to use specific storage class and use server-side encryption.
func ExampleS3_PutObject_shared04() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
- Metadata: map[string]*string{
- "metadata1": aws.String("value1"),
- "metadata2": aws.String("value2"),
- },
+ Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
+ ServerSideEncryption: aws.String("AES256"),
+ StorageClass: aws.String("STANDARD_IA"),
}
result, err := svc.PutObject(input)
@@ -1903,17 +1902,16 @@ func ExampleS3_PutObject_shared04() {
fmt.Println(result)
}
-// To upload an object and specify canned ACL.
-// The following example uploads and object. The request specifies optional canned ACL
-// (access control list) to all READ access to authenticated users. If the bucket is
-// versioning enabled, S3 returns version ID in response.
+// To upload an object and specify optional tags
+// The following example uploads an object. The request specifies optional object tags.
+// The bucket is versioned, therefore S3 returns version ID of the newly created object.
func ExampleS3_PutObject_shared05() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- ACL: aws.String("authenticated-read"),
- Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("exampleobject"),
+ Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("HappyFace.jpg"),
+ Tagging: aws.String("key1=value1&key2=value2"),
}
result, err := svc.PutObject(input)
@@ -1934,16 +1932,18 @@ func ExampleS3_PutObject_shared05() {
fmt.Println(result)
}
-// To upload an object
-// The following example uploads an object to a versioning-enabled bucket. The source
-// file is specified using Windows file syntax. S3 returns VersionId of the newly created
-// object.
+// To upload an object and specify server-side encryption and object tags
+// The following example uploads an object. The request specifies the optional server-side
+// encryption option. The request also specifies optional object tags. If the bucket
+// is versioning enabled, S3 returns version ID in response.
func ExampleS3_PutObject_shared06() {
svc := s3.New(session.New())
input := &s3.PutObjectInput{
- Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")),
- Bucket: aws.String("examplebucket"),
- Key: aws.String("HappyFace.jpg"),
+ Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")),
+ Bucket: aws.String("examplebucket"),
+ Key: aws.String("exampleobject"),
+ ServerSideEncryption: aws.String("AES256"),
+ Tagging: aws.String("key1=value1&key2=value2"),
}
result, err := svc.PutObject(input)
@@ -2104,17 +2104,18 @@ func ExampleS3_UploadPart_shared00() {
fmt.Println(result)
}
-// To upload a part by copying data from an existing object as data source
-// The following example uploads a part of a multipart upload by copying data from an
-// existing object as data source.
+// To upload a part by copying byte range from an existing object as data source
+// The following example uploads a part of a multipart upload by copying a specified
+// byte range from an existing object as data source.
func ExampleS3_UploadPartCopy_shared00() {
svc := s3.New(session.New())
input := &s3.UploadPartCopyInput{
- Bucket: aws.String("examplebucket"),
- CopySource: aws.String("/bucketname/sourceobjectkey"),
- Key: aws.String("examplelargeobject"),
- PartNumber: aws.Int64(1),
- UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
+ Bucket: aws.String("examplebucket"),
+ CopySource: aws.String("/bucketname/sourceobjectkey"),
+ CopySourceRange: aws.String("bytes=1-100000"),
+ Key: aws.String("examplelargeobject"),
+ PartNumber: aws.Int64(2),
+ UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
}
result, err := svc.UploadPartCopy(input)
@@ -2135,18 +2136,17 @@ func ExampleS3_UploadPartCopy_shared00() {
fmt.Println(result)
}
-// To upload a part by copying byte range from an existing object as data source
-// The following example uploads a part of a multipart upload by copying a specified
-// byte range from an existing object as data source.
+// To upload a part by copying data from an existing object as data source
+// The following example uploads a part of a multipart upload by copying data from an
+// existing object as data source.
func ExampleS3_UploadPartCopy_shared01() {
svc := s3.New(session.New())
input := &s3.UploadPartCopyInput{
- Bucket: aws.String("examplebucket"),
- CopySource: aws.String("/bucketname/sourceobjectkey"),
- CopySourceRange: aws.String("bytes=1-100000"),
- Key: aws.String("examplelargeobject"),
- PartNumber: aws.Int64(2),
- UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
+ Bucket: aws.String("examplebucket"),
+ CopySource: aws.String("/bucketname/sourceobjectkey"),
+ Key: aws.String("examplelargeobject"),
+ PartNumber: aws.Int64(1),
+ UploadId: aws.String("exampleuoh_10OhKhT7YukE9bjzTPRiuaCotmZM_pFngJFir9OZNrSr5cWa3cq3LZSUsfjI4FI7PkP91We7Nrw--"),
}
result, err := svc.UploadPartCopy(input)