From 3c72a703e0f51aad38a872b7acf6d7ae3c4601c0 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 24 Aug 2021 18:11:53 +0000 Subject: [PATCH 1/2] Update to latest models --- .../api-change-iotdata-21712.json | 5 + .../api-change-mediaconvert-30507.json | 5 + .../next-release/api-change-polly-20334.json | 5 + .../next-release/api-change-ssm-87345.json | 5 + .../api-change-transcribe-42056.json | 5 + .../iot-data/2015-05-28/paginators-1.json | 9 +- .../data/iot-data/2015-05-28/service-2.json | 159 ++++++++++- .../mediaconvert/2017-08-29/service-2.json | 112 +++++++- botocore/data/polly/2016-06-10/service-2.json | 22 +- botocore/data/ssm/2014-11-06/service-2.json | 5 +- .../data/transcribe/2017-10-26/service-2.json | 263 +++++++++++++++--- 11 files changed, 526 insertions(+), 69 deletions(-) create mode 100644 .changes/next-release/api-change-iotdata-21712.json create mode 100644 .changes/next-release/api-change-mediaconvert-30507.json create mode 100644 .changes/next-release/api-change-polly-20334.json create mode 100644 .changes/next-release/api-change-ssm-87345.json create mode 100644 .changes/next-release/api-change-transcribe-42056.json diff --git a/.changes/next-release/api-change-iotdata-21712.json b/.changes/next-release/api-change-iotdata-21712.json new file mode 100644 index 0000000000..a47beaa1b1 --- /dev/null +++ b/.changes/next-release/api-change-iotdata-21712.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``iot-data``", + "description": "Updated Publish with support for new Retain flag and added two new API operations: GetRetainedMessage, ListRetainedMessages." +} diff --git a/.changes/next-release/api-change-mediaconvert-30507.json b/.changes/next-release/api-change-mediaconvert-30507.json new file mode 100644 index 0000000000..42b4103a1d --- /dev/null +++ b/.changes/next-release/api-change-mediaconvert-30507.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``mediaconvert``", + "description": "AWS Elemental MediaConvert SDK has added MBAFF encoding support for AVC video and the ability to pass encryption context from the job settings to S3." +} diff --git a/.changes/next-release/api-change-polly-20334.json b/.changes/next-release/api-change-polly-20334.json new file mode 100644 index 0000000000..015ca4bd3e --- /dev/null +++ b/.changes/next-release/api-change-polly-20334.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``polly``", + "description": "Amazon Polly adds new New Zealand English voice - Aria. Aria is available as Neural voice only." +} diff --git a/.changes/next-release/api-change-ssm-87345.json b/.changes/next-release/api-change-ssm-87345.json new file mode 100644 index 0000000000..6292de149a --- /dev/null +++ b/.changes/next-release/api-change-ssm-87345.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ssm``", + "description": "Updated Parameter Store property for logging improvements." +} diff --git a/.changes/next-release/api-change-transcribe-42056.json b/.changes/next-release/api-change-transcribe-42056.json new file mode 100644 index 0000000000..7de6bc0d49 --- /dev/null +++ b/.changes/next-release/api-change-transcribe-42056.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``transcribe``", + "description": "This release adds support for feature tagging with Amazon Transcribe batch jobs." +} diff --git a/botocore/data/iot-data/2015-05-28/paginators-1.json b/botocore/data/iot-data/2015-05-28/paginators-1.json index ea142457a6..26d4a561ca 100644 --- a/botocore/data/iot-data/2015-05-28/paginators-1.json +++ b/botocore/data/iot-data/2015-05-28/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListRetainedMessages": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "retainedTopics" + } + } } diff --git a/botocore/data/iot-data/2015-05-28/service-2.json b/botocore/data/iot-data/2015-05-28/service-2.json index 394d79bbc1..1227a9f7a2 100644 --- a/botocore/data/iot-data/2015-05-28/service-2.json +++ b/botocore/data/iot-data/2015-05-28/service-2.json @@ -29,7 +29,26 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Deletes the shadow for the specified thing.

For more information, see DeleteThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Deletes the shadow for the specified thing.

Requires permission to access the DeleteThingShadow action.

For more information, see DeleteThingShadow in the IoT Developer Guide.

" + }, + "GetRetainedMessage":{ + "name":"GetRetainedMessage", + "http":{ + "method":"GET", + "requestUri":"/retainedMessage/{topic}" + }, + "input":{"shape":"GetRetainedMessageRequest"}, + "output":{"shape":"GetRetainedMessageResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"} + ], + "documentation":"

Gets the details of a single retained message for the specified topic.

This action returns the message payload of the retained message, which can incur messaging costs. To list only the topic names of the retained messages, call ListRetainedMessages.

Requires permission to access the GetRetainedMessage action.

For more information about messaging costs, see IoT Core pricing - Messaging.

" }, "GetThingShadow":{ "name":"GetThingShadow", @@ -49,7 +68,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Gets the shadow for the specified thing.

For more information, see GetThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Gets the shadow for the specified thing.

Requires permission to access the GetThingShadow action.

For more information, see GetThingShadow in the IoT Developer Guide.

" }, "ListNamedShadowsForThing":{ "name":"ListNamedShadowsForThing", @@ -68,7 +87,25 @@ {"shape":"InternalFailureException"}, {"shape":"MethodNotAllowedException"} ], - "documentation":"

Lists the shadows for the specified thing.

" + "documentation":"

Lists the shadows for the specified thing.

Requires permission to access the ListNamedShadowsForThing action.

" + }, + "ListRetainedMessages":{ + "name":"ListRetainedMessages", + "http":{ + "method":"GET", + "requestUri":"/retainedMessage" + }, + "input":{"shape":"ListRetainedMessagesRequest"}, + "output":{"shape":"ListRetainedMessagesResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"ThrottlingException"}, + {"shape":"UnauthorizedException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InternalFailureException"}, + {"shape":"MethodNotAllowedException"} + ], + "documentation":"

Lists summary information about the retained messages stored for the account.

This action returns only the topic names of the retained messages. It doesn't return any message payloads. Although this action doesn't return a message payload, it can still incur messaging costs.

To get the message payload of a retained message, call GetRetainedMessage with the topic name of the retained message.

Requires permission to access the ListRetainedMessages action.

For more information about messaging costs, see IoT Core pricing - Messaging.

" }, "Publish":{ "name":"Publish", @@ -83,7 +120,7 @@ {"shape":"UnauthorizedException"}, {"shape":"MethodNotAllowedException"} ], - "documentation":"

Publishes state information.

For more information, see HTTP Protocol in the AWS IoT Developer Guide.

" + "documentation":"

Publishes an MQTT message.

Requires permission to access the Publish action.

For more information about MQTT messages, see MQTT Protocol in the IoT Developer Guide.

For more information about messaging costs, see IoT Core pricing - Messaging.

" }, "UpdateThingShadow":{ "name":"UpdateThingShadow", @@ -104,7 +141,7 @@ {"shape":"MethodNotAllowedException"}, {"shape":"UnsupportedDocumentEncodingException"} ], - "documentation":"

Updates the shadow for the specified thing.

For more information, see UpdateThingShadow in the AWS IoT Developer Guide.

" + "documentation":"

Updates the shadow for the specified thing.

Requires permission to access the UpdateThingShadow action.

For more information, see UpdateThingShadow in the IoT Developer Guide.

" } }, "shapes":{ @@ -151,6 +188,41 @@ "documentation":"

The output from the DeleteThingShadow operation.

", "payload":"payload" }, + "GetRetainedMessageRequest":{ + "type":"structure", + "required":["topic"], + "members":{ + "topic":{ + "shape":"Topic", + "documentation":"

The topic name of the retained message to retrieve.

", + "location":"uri", + "locationName":"topic" + } + }, + "documentation":"

The input for the GetRetainedMessage operation.

" + }, + "GetRetainedMessageResponse":{ + "type":"structure", + "members":{ + "topic":{ + "shape":"Topic", + "documentation":"

The topic name to which the retained message was published.

" + }, + "payload":{ + "shape":"Payload", + "documentation":"

The Base64-encoded message payload of the retained message body.

" + }, + "qos":{ + "shape":"Qos", + "documentation":"

The quality of service (QoS) level used to publish the retained message.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The Epoch date and time, in milliseconds, when the retained message was stored by IoT.

" + } + }, + "documentation":"

The output from the GetRetainedMessage operation.

" + }, "GetThingShadowRequest":{ "type":"structure", "required":["thingName"], @@ -240,14 +312,49 @@ }, "nextToken":{ "shape":"NextToken", - "documentation":"

The token for the next set of results, or null if there are no additional results.

" + "documentation":"

The token to use to get the next set of results, or null if there are no additional results.

" }, "timestamp":{ "shape":"Timestamp", - "documentation":"

The Epoch date and time the response was generated by AWS IoT.

" + "documentation":"

The Epoch date and time the response was generated by IoT.

" + } + } + }, + "ListRetainedMessagesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

To retrieve the next set of results, the nextToken value from a previous response; otherwise null to receive the first set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return at one time.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListRetainedMessagesResponse":{ + "type":"structure", + "members":{ + "retainedTopics":{ + "shape":"RetainedMessageList", + "documentation":"

A summary list the account's retained messages. The information returned doesn't include the message payloads of the retained messages.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results, or null if there are no additional results.

" } } }, + "MaxResults":{ + "type":"integer", + "max":200, + "min":1 + }, "MethodNotAllowedException":{ "type":"structure", "members":{ @@ -271,6 +378,7 @@ "min":1 }, "Payload":{"type":"blob"}, + "PayloadSize":{"type":"long"}, "PublishRequest":{ "type":"structure", "required":["topic"], @@ -287,9 +395,15 @@ "location":"querystring", "locationName":"qos" }, + "retain":{ + "shape":"Retain", + "documentation":"

A Boolean value that determines whether to set the RETAIN flag when the message is published.

Setting the RETAIN flag causes the message to be retained and sent to new subscribers to the topic.

Valid values: true | false

Default value: false

", + "location":"querystring", + "locationName":"retain" + }, "payload":{ "shape":"Payload", - "documentation":"

The state information, in JSON format.

" + "documentation":"

The message body. MQTT accepts text, binary, and empty (null) message payloads.

Publishing an empty (null) payload with retain = true deletes the retained message identified by topic from IoT Core.

" } }, "documentation":"

The input for the Publish operation.

", @@ -324,6 +438,33 @@ "error":{"httpStatusCode":404}, "exception":true }, + "Retain":{"type":"boolean"}, + "RetainedMessageList":{ + "type":"list", + "member":{"shape":"RetainedMessageSummary"} + }, + "RetainedMessageSummary":{ + "type":"structure", + "members":{ + "topic":{ + "shape":"Topic", + "documentation":"

The topic name to which the retained message was published.

" + }, + "payloadSize":{ + "shape":"PayloadSize", + "documentation":"

The size of the retained message's payload in bytes.

" + }, + "qos":{ + "shape":"Qos", + "documentation":"

The quality of service (QoS) level used to publish the retained message.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The Epoch date and time, in milliseconds, when the retained message was stored by IoT.

" + } + }, + "documentation":"

Information about a single retained message.

" + }, "ServiceUnavailableException":{ "type":"structure", "members":{ @@ -427,5 +568,5 @@ }, "errorMessage":{"type":"string"} }, - "documentation":"AWS IoT

AWS IoT-Data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the AWS cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the AWS cloud.

Find the endpoint address for actions in the AWS IoT data plane by running this CLI command:

aws iot describe-endpoint --endpoint-type iot:Data-ATS

The service name used by AWS Signature Version 4 to sign requests is: iotdevicegateway.

" + "documentation":"IoT data

IoT data enables secure, bi-directional communication between Internet-connected things (such as sensors, actuators, embedded devices, or smart appliances) and the Amazon Web Services cloud. It implements a broker for applications and things to publish messages over HTTP (Publish) and retrieve, update, and delete shadows. A shadow is a persistent representation of your things and their state in the Amazon Web Services cloud.

Find the endpoint address for actions in IoT data by running this CLI command:

aws iot describe-endpoint --endpoint-type iot:Data-ATS

The service name used by Amazon Web ServicesSignature Version 4 to sign requests is: iotdevicegateway.

" } diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index d437e8d0b7..8fc0481e9c 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -2621,7 +2621,7 @@ "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Length of fragments to generate (in seconds). Fragment length must be compatible with GOP size and Framerate. Note that fragments will end on the next keyframe after this number of seconds, so actual fragment length may be longer. When Emit Single File is checked, the fragmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." + "documentation": "Specify the length, in whole seconds, of the mp4 fragments. When you don't specify a value, MediaConvert defaults to 2. Related setting: Use Fragment length control (FragmentLengthControl) to specify whether the encoder enforces this value strictly." }, "ImageBasedTrickPlay": { "shape": "CmafImageBasedTrickPlay", @@ -2666,7 +2666,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Use this setting to specify the length, in seconds, of each individual CMAF segment. This value applies to the whole package; that is, to every output in the output group. Note that segments end on the first keyframe after this number of seconds, so the actual segment length might be slightly longer. If you set Segment control (CmafSegmentControl) to single file, the service puts the content of each output in a single file that has metadata that marks these segments. If you set it to segmented files, the service creates multiple files for each output, each with the content of one segment." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (CmafSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + }, + "SegmentLengthControl": { + "shape": "CmafSegmentLengthControl", + "locationName": "segmentLengthControl", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." }, "StreamInfResolution": { "shape": "CmafStreamInfResolution", @@ -2761,6 +2766,14 @@ "SEGMENTED_FILES" ] }, + "CmafSegmentLengthControl": { + "type": "string", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "enum": [ + "EXACT", + "GOP_MULTIPLE" + ] + }, "CmafStreamInfResolution": { "type": "string", "documentation": "Include or exclude RESOLUTION attribute for video in EXT-X-STREAM-INF tag of variant manifest.", @@ -3434,7 +3447,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Length of mpd segments to create (in seconds). Note that segments will end on the next keyframe after this number of seconds, so actual segment length may be longer. When Emit Single File is checked, the segmentation is internal to a single output file and it does not cause the creation of many output files as in other output types." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 30. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (DashIsoSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + }, + "SegmentLengthControl": { + "shape": "DashIsoSegmentLengthControl", + "locationName": "segmentLengthControl", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." }, "WriteSegmentTimelineInRepresentation": { "shape": "DashIsoWriteSegmentTimelineInRepresentation", @@ -3493,6 +3511,14 @@ "SEGMENTED_FILES" ] }, + "DashIsoSegmentLengthControl": { + "type": "string", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "enum": [ + "EXACT", + "GOP_MULTIPLE" + ] + }, "DashIsoWriteSegmentTimelineInRepresentation": { "type": "string", "documentation": "When you enable Precise segment duration in manifests (writeSegmentTimelineInRepresentation), your DASH manifest shows precise segment durations. The segment duration information appears inside the SegmentTimeline element, inside SegmentTemplate at the Representation level. When this feature isn't enabled, the segment durations in your DASH manifest are approximate. The segment duration information appears in the duration attribute of the SegmentTemplate element.", @@ -4857,10 +4883,11 @@ }, "H264FieldEncoding": { "type": "string", - "documentation": "Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields.", + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs.", "enum": [ "PAFF", - "FORCE_FIELD" + "FORCE_FIELD", + "MBAFF" ] }, "H264FlickerAdaptiveQuantization": { @@ -5023,7 +5050,7 @@ "FieldEncoding": { "shape": "H264FieldEncoding", "locationName": "fieldEncoding", - "documentation": "Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields." + "documentation": "The video encoding method for your MPEG-4 AVC output. Keep the default value, PAFF, to have MediaConvert use PAFF encoding for interlaced outputs. Choose Force field (FORCE_FIELD) to disable PAFF encoding and create separate interlaced fields. Choose MBAFF to disable PAFF and have MediaConvert use MBAFF encoding for interlaced outputs." }, "FlickerAdaptiveQuantization": { "shape": "H264FlickerAdaptiveQuantization", @@ -5098,7 +5125,7 @@ "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", - "documentation": "Number of B-frames between reference frames." + "documentation": "Specify the number of B-frames that MediaConvert puts between reference frames in this output. Valid values are whole numbers from 0 through 7. When you don't specify a value, MediaConvert defaults to 2." }, "NumberReferenceFrames": { "shape": "__integerMin1Max6", @@ -5535,7 +5562,7 @@ "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", - "documentation": "Number of B-frames between reference frames." + "documentation": "Specify the number of B-frames that MediaConvert puts between reference frames in this output. Valid values are whole numbers from 0 through 7. When you don't specify a value, MediaConvert defaults to 2." }, "NumberReferenceFrames": { "shape": "__integerMin1Max6", @@ -6058,7 +6085,12 @@ "SegmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "segmentLength", - "documentation": "Length of MPEG-2 Transport Stream segments to create (in seconds). Note that segments will end on the next keyframe after this number of seconds, so actual segment length may be longer." + "documentation": "Specify the length, in whole seconds, of each segment. When you don't specify a value, MediaConvert defaults to 10. Related settings: Use Segment length control (SegmentLengthControl) to specify whether the encoder enforces this value strictly. Use Segment control (HlsSegmentControl) to specify whether MediaConvert creates separate segment files or one content file that has metadata to mark the segment boundaries." + }, + "SegmentLengthControl": { + "shape": "HlsSegmentLengthControl", + "locationName": "segmentLengthControl", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." }, "SegmentsPerSubdirectory": { "shape": "__integerMin1Max2147483647", @@ -6195,6 +6227,14 @@ "SEGMENTED_FILES" ] }, + "HlsSegmentLengthControl": { + "type": "string", + "documentation": "Specify how you want MediaConvert to determine the segment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Segment length (SegmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "enum": [ + "EXACT", + "GOP_MULTIPLE" + ] + }, "HlsSettings": { "type": "structure", "members": { @@ -7609,6 +7649,14 @@ "NONE" ] }, + "M2tsDataPtsControl": { + "type": "string", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "enum": [ + "AUTO", + "ALIGN_TO_VIDEO" + ] + }, "M2tsEbpAudioInterval": { "type": "string", "documentation": "When set to VIDEO_AND_FIXED_INTERVALS, audio EBP markers will be added to partitions 3 and 4. The interval between these additional markers will be fixed, and will be slightly shorter than the video EBP marker interval. When set to VIDEO_INTERVAL, these additional markers will not be inserted. Only applicable when EBP segmentation markers are is selected (segmentationMarkers is EBP or EBP_LEGACY).", @@ -7737,6 +7785,11 @@ "locationName": "bufferModel", "documentation": "Controls what buffer model to use for accurate interleaving. If set to MULTIPLEX, use multiplex buffer model. If set to NONE, this can lead to lower latency, but low-memory devices may not be able to play back the stream without interruptions." }, + "DataPTSControl": { + "shape": "M2tsDataPtsControl", + "locationName": "dataPTSControl", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + }, "DvbNitSettings": { "shape": "DvbNitSettings", "locationName": "dvbNitSettings", @@ -7903,6 +7956,14 @@ "MATCH_VIDEO_DURATION" ] }, + "M3u8DataPtsControl": { + "type": "string", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values.", + "enum": [ + "AUTO", + "ALIGN_TO_VIDEO" + ] + }, "M3u8NielsenId3": { "type": "string", "documentation": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", @@ -7945,6 +8006,11 @@ "locationName": "audioPids", "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, + "DataPTSControl": { + "shape": "M3u8DataPtsControl", + "locationName": "dataPTSControl", + "documentation": "If you select ALIGN_TO_VIDEO, MediaConvert writes captions and data packets with Presentation Timestamp (PTS) values greater than or equal to the first video packet PTS (MediaConvert drops captions and data packets with lesser PTS values). Keep the default value (AUTO) to allow all PTS values." + }, "MaxPcrInterval": { "shape": "__integerMin0Max500", "locationName": "maxPcrInterval", @@ -8425,7 +8491,7 @@ }, "Mpeg2GopSizeUnits": { "type": "string", - "documentation": "Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If seconds the system will convert the GOP Size into a frame count at run time.", + "documentation": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames.", "enum": [ "FRAMES", "SECONDS" @@ -8549,12 +8615,12 @@ "GopSize": { "shape": "__doubleMin0", "locationName": "gopSize", - "documentation": "GOP Length (keyframe interval) in frames or seconds. Must be greater than zero." + "documentation": "Specify the interval between keyframes, in seconds or frames, for this output. Default: 12 Related settings: When you specify the GOP size in seconds, set GOP mode control (GopSizeUnits) to Specified, seconds (SECONDS). The default value for GOP mode control (GopSizeUnits) is Frames (FRAMES)." }, "GopSizeUnits": { "shape": "Mpeg2GopSizeUnits", "locationName": "gopSizeUnits", - "documentation": "Indicates if the GOP Size in MPEG2 is specified in frames or seconds. If seconds the system will convert the GOP Size into a frame count at run time." + "documentation": "Specify the units for GOP size (GopSize). If you don't specify a value here, by default the encoder measures GOP size in frames." }, "HrdBufferInitialFillPercentage": { "shape": "__integerMin0Max100", @@ -8589,7 +8655,7 @@ "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", - "documentation": "Number of B-frames between reference frames." + "documentation": "Specify the number of B-frames that MediaConvert puts between reference frames in this output. Valid values are whole numbers from 0 through 7. When you don't specify a value, MediaConvert defaults to 2." }, "ParControl": { "shape": "Mpeg2ParControl", @@ -8735,6 +8801,14 @@ }, "documentation": "If you are using DRM, set DRM System (MsSmoothEncryptionSettings) to specify the value SpekeKeyProvider." }, + "MsSmoothFragmentLengthControl": { + "type": "string", + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary.", + "enum": [ + "EXACT", + "GOP_MULTIPLE" + ] + }, "MsSmoothGroupSettings": { "type": "structure", "members": { @@ -8766,7 +8840,12 @@ "FragmentLength": { "shape": "__integerMin1Max2147483647", "locationName": "fragmentLength", - "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and frame rate." + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." + }, + "FragmentLengthControl": { + "shape": "MsSmoothFragmentLengthControl", + "locationName": "fragmentLengthControl", + "documentation": "Specify how you want MediaConvert to determine the fragment length. Choose Exact (EXACT) to have the encoder use the exact length that you specify with the setting Fragment length (FragmentLength). This might result in extra I-frames. Choose Multiple of GOP (GOP_MULTIPLE) to have the encoder round up the segment lengths to match the next GOP boundary." }, "ManifestEncoding": { "shape": "MsSmoothManifestEncoding", @@ -9854,6 +9933,11 @@ "locationName": "encryptionType", "documentation": "Specify how you want your data keys managed. AWS uses data keys to encrypt your content. AWS also encrypts the data keys themselves, using a customer master key (CMK), and then stores the encrypted data keys alongside your encrypted content. Use this setting to specify which AWS service manages the CMK. For simplest set up, choose Amazon S3 (SERVER_SIDE_ENCRYPTION_S3). If you want your master key to be managed by AWS Key Management Service (KMS), choose AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). By default, when you choose AWS KMS, KMS uses the AWS managed customer master key (CMK) associated with Amazon S3 to encrypt your data keys. You can optionally choose to specify a different, customer managed CMK. Do so by specifying the Amazon Resource Name (ARN) of the key for the setting KMS ARN (kmsKeyArn)." }, + "KmsEncryptionContext": { + "shape": "__stringPatternAZaZ0902", + "locationName": "kmsEncryptionContext", + "documentation": "Optionally, specify the encryption context that you want to use alongside your KMS key. AWS KMS uses this encryption context as additional authenticated data (AAD) to support authenticated encryption. This value must be a base64-encoded UTF-8 string holding JSON which represents a string-string map. To use this setting, you must also set Server-side encryption (S3ServerSideEncryptionType) to AWS KMS (SERVER_SIDE_ENCRYPTION_KMS). For more information about encryption context, see: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#encrypt_context." + }, "KmsKeyArn": { "shape": "__stringPatternArnAwsUsGovCnKmsAZ26EastWestCentralNorthSouthEastWest1912D12KeyAFAF098AFAF094AFAF094AFAF094AFAF0912", "locationName": "kmsKeyArn", diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index 65b8b20ddc..c91f9d9bde 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -23,7 +23,7 @@ {"shape":"LexiconNotFoundException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Deletes the specified pronunciation lexicon stored in an AWS Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

" + "documentation":"

Deletes the specified pronunciation lexicon stored in an Amazon Web Services Region. A lexicon which has been deleted is not available for speech synthesis, nor is it possible to retrieve it using either the GetLexicon or ListLexicon APIs.

For more information, see Managing Lexicons.

" }, "DescribeVoices":{ "name":"DescribeVoices", @@ -53,7 +53,7 @@ {"shape":"LexiconNotFoundException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Returns the content of the specified pronunciation lexicon stored in an AWS Region. For more information, see Managing Lexicons.

" + "documentation":"

Returns the content of the specified pronunciation lexicon stored in an Amazon Web Services Region. For more information, see Managing Lexicons.

" }, "GetSpeechSynthesisTask":{ "name":"GetSpeechSynthesisTask", @@ -84,7 +84,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Returns a list of pronunciation lexicons stored in an AWS Region. For more information, see Managing Lexicons.

" + "documentation":"

Returns a list of pronunciation lexicons stored in an Amazon Web Services Region. For more information, see Managing Lexicons.

" }, "ListSpeechSynthesisTasks":{ "name":"ListSpeechSynthesisTasks", @@ -119,7 +119,7 @@ {"shape":"MaxLexiconsNumberExceededException"}, {"shape":"ServiceFailureException"} ], - "documentation":"

Stores a pronunciation lexicon in an AWS Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

" + "documentation":"

Stores a pronunciation lexicon in an Amazon Web Services Region. If a lexicon with the same name already exists in the region, it is overwritten by the new lexicon. Lexicon operations have eventual consistency, therefore, it might take some time before the lexicon is available to the SynthesizeSpeech operation.

For more information, see Managing Lexicons.

" }, "StartSpeechSynthesisTask":{ "name":"StartSpeechSynthesisTask", @@ -144,7 +144,7 @@ {"shape":"SsmlMarksNotSupportedForTextTypeException"}, {"shape":"LanguageNotSupportedException"} ], - "documentation":"

Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status.

" + "documentation":"

Allows the creation of an asynchronous synthesis task, by starting a new SpeechSynthesisTask. This operation requires all the standard information needed for speech synthesis, plus the name of an Amazon S3 bucket for the service to store the output of the synthesis task and two optional parameters (OutputS3KeyPrefix and SnsTopicArn). Once the synthesis task is created, this operation will return a SpeechSynthesisTask object, which will include an identifier of this task as well as the current status. The SpeechSynthesisTask object is available for 72 hours after starting the asynchronous synthesis task.

" }, "SynthesizeSpeech":{ "name":"SynthesizeSpeech", @@ -414,7 +414,8 @@ "ro-RO", "ru-RU", "sv-SE", - "tr-TR" + "tr-TR", + "en-NZ" ] }, "LanguageCodeList":{ @@ -721,7 +722,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" + "documentation":"

Optional language code for the Speech Synthesis request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" }, "LexiconNames":{ "shape":"LexiconNameList", @@ -835,7 +836,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" + "documentation":"

Optional language code for a synthesis task. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" } }, "documentation":"

SynthesisTask object that provides information about a speech synthesis task.

" @@ -867,7 +868,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly will use the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" + "documentation":"

Optional language code for the Synthesize Speech request. This is only necessary if using a bilingual voice, such as Aditi, which can be used for either Indian English (en-IN) or Hindi (hi-IN).

If a bilingual voice is used and no language code is specified, Amazon Polly uses the default language of the bilingual voice. The default language for any voice is the one returned by the DescribeVoices operation for the LanguageCode parameter. For example, if no language code is specified, Aditi will use Indian English rather than Hindi.

" }, "LexiconNames":{ "shape":"LexiconNameList", @@ -1069,7 +1070,8 @@ "Vicki", "Vitoria", "Zeina", - "Zhiyu" + "Zhiyu", + "Aria" ] }, "VoiceList":{ diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index df25323b26..c352c1ec37 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -11984,7 +11984,10 @@ "max":128, "min":0 }, - "PSParameterValue":{"type":"string"}, + "PSParameterValue":{ + "type":"string", + "sensitive":true + }, "PSParameterVersion":{"type":"long"}, "Parameter":{ "type":"structure", diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json index 152342c812..832eddce5e 100644 --- a/botocore/data/transcribe/2017-10-26/service-2.json +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -59,7 +59,7 @@ {"shape":"InternalFailureException"}, {"shape":"ConflictException"} ], - "documentation":"

Creates a new custom vocabulary that you can use to change how Amazon Transcribe Medical transcribes your audio file.

" + "documentation":"

Creates a new custom vocabulary that you can use to modify how Amazon Transcribe Medical transcribes your audio file.

" }, "CreateVocabulary":{ "name":"CreateVocabulary", @@ -414,6 +414,22 @@ ], "documentation":"

Returns a list of vocabularies that match the specified criteria. If you don't enter a value in any of the request parameters, returns the entire list of vocabularies.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists all tags associated with a given transcription job, vocabulary, or resource.

" + }, "ListTranscriptionJobs":{ "name":"ListTranscriptionJobs", "http":{ @@ -507,6 +523,40 @@ ], "documentation":"

Starts an asynchronous job to transcribe speech to text.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Tags a Amazon Transcribe resource with the given list of tags.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"BadRequestException"}, + {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Removes specified tags from a specified Amazon Transcribe resource.

" + }, "UpdateCallAnalyticsCategory":{ "name":"UpdateCallAnalyticsCategory", "http":{ @@ -637,7 +687,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

If you know the language spoken between the customer and the agent, specify a language code for this field.

If you don't know the language, you can leave this field blank, and Amazon Transcribe will use machine learning to automatically identify the language. To improve the accuracy of language identification, you can provide an array containing the possible language codes for the language spoken in your audio.

The following list shows the supported languages and corresponding language codes for call analytics jobs:

" + "documentation":"

If you know the language spoken between the customer and the agent, specify a language code for this field.

If you don't know the language, you can leave this field blank, and Amazon Transcribe will use machine learning to automatically identify the language. To improve the accuracy of language identification, you can provide an array containing the possible language codes for the language spoken in your audio. Refer to Supported languages and language-specific features for additional information.

" }, "MediaSampleRateHertz":{ "shape":"MediaSampleRateHertz", @@ -663,7 +713,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If the AnalyticsJobStatus is FAILED, this field contains information about why the job failed.

The FailureReason field can contain one of the following values:

" + "documentation":"

If the AnalyticsJobStatus is FAILED, this field contains information about why the job failed.

The FailureReason field can contain one of the following values:

" }, "DataAccessRoleArn":{ "shape":"DataAccessRoleArn", @@ -712,7 +762,7 @@ "ContentRedaction":{"shape":"ContentRedaction"}, "LanguageOptions":{ "shape":"LanguageOptions", - "documentation":"

When you run a call analytics job, you can specify the language spoken in the audio, or you can have Amazon Transcribe identify the language for you.

To specify a language, specify an array with one language code. If you don't know the language, you can leave this field blank and Amazon Transcribe will use machine learning to identify the language for you. To improve the ability of Amazon Transcribe to correctly identify the language, you can provide an array of the languages that can be present in the audio.

The following list shows the supported languages and corresponding language codes for call analytics jobs:

" + "documentation":"

When you run a call analytics job, you can specify the language spoken in the audio, or you can have Amazon Transcribe identify the language for you.

To specify a language, specify an array with one language code. If you don't know the language, you can leave this field blank and Amazon Transcribe will use machine learning to identify the language for you. To improve the ability of Amazon Transcribe to correctly identify the language, you can provide an array of the languages that can be present in the audio. Refer to Supported languages and language-specific features for additional information.

" } }, "documentation":"

Provides optional settings for the CallAnalyticsJob operation.

" @@ -888,7 +938,7 @@ }, "BaseModelName":{ "shape":"BaseModelName", - "documentation":"

The Amazon Transcribe standard language model, or base model used to create your custom language model.

If you want to use your custom language model to transcribe audio with a sample rate of 16 kHz or greater, choose Wideband.

If you want to use your custom language model to transcribe audio with a sample rate that is less than 16 kHz, choose Narrowband.

" + "documentation":"

The Amazon Transcribe standard language model, or base model used to create your custom language model.

If you want to use your custom language model to transcribe audio with a sample rate of 16,000 Hz or greater, choose Wideband.

If you want to use your custom language model to transcribe audio with a sample rate that is less than 16,000 Hz, choose Narrowband.

" }, "ModelName":{ "shape":"ModelName", @@ -897,6 +947,10 @@ "InputDataConfig":{ "shape":"InputDataConfig", "documentation":"

Contains the data access role and the Amazon S3 prefixes to read the required input files to create a custom language model.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Adds one or more tags, each in the form of a key:value pair, to a new language model at the time you create this new model.

" } } }, @@ -944,6 +998,10 @@ "VocabularyFileUri":{ "shape":"Uri", "documentation":"

The location in Amazon S3 of the text file you use to define your custom vocabulary. The URI must be in the same Amazon Web Services Region as the resource that you're calling. Enter information about your VocabularyFileUri in the following format:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

The following is an example URI for a vocabulary file that is stored in Amazon S3:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Medical Custom Vocabularies.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Adds one or more tags, each in the form of a key:value pair, to a new medical vocabulary at the time you create this new vocabulary.

" } } }, @@ -994,6 +1052,10 @@ "VocabularyFilterFileUri":{ "shape":"Uri", "documentation":"

The Amazon S3 location of a text file used as input to create the vocabulary filter. Only use characters from the character set defined for custom vocabularies. For a list of character sets, see Character Sets for Custom Vocabularies.

The specified file must be less than 50 KB of UTF-8 characters.

If you provide the location of a list of words in the VocabularyFilterFileUri parameter, you can't use the Words parameter.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary filter at the time you create this new vocabulary filter.

" } } }, @@ -1027,7 +1089,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see what-is-transcribe.

" + "documentation":"

The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see transcribe-whatis.

" }, "Phrases":{ "shape":"Phrases", @@ -1035,7 +1097,11 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom Vocabularies.

" + "documentation":"

The S3 location of the text file that contains the definition of the custom vocabulary. The URI must be in the same region as the API endpoint that you are calling. The general form is:

For more information about S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies, see Custom vocabularies.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Adds one or more tags, each in the form of a key:value pair, to a new Amazon Transcribe vocabulary at the time you create this new vocabulary.

" } } }, @@ -1261,7 +1327,7 @@ }, "VocabularyState":{ "shape":"VocabularyState", - "documentation":"

The processing state of the vocabulary. If the VocabularyState is READY then you can use it in the StartMedicalTranscriptionJob operation.

" + "documentation":"

The processing state of the vocabulary. If the VocabularyState is READY then you can use it in the StartMedicalTranscriptionJob operation.

" }, "LastModifiedTime":{ "shape":"DateTime", @@ -1549,7 +1615,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of categories to return in the response. If there are fewer results in the list, the response contains only the actual results.

" + "documentation":"

The maximum number of categories to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" } } }, @@ -1583,7 +1649,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of call analytics jobs to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + "documentation":"

The maximum number of call analytics jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" } } }, @@ -1621,7 +1687,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of language models to return in the response. If there are fewer results in the list, the response contains only the actual results.

" + "documentation":"

The maximum number of language models to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" } } }, @@ -1655,7 +1721,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of medical transcription jobs to return in the response. IF there are fewer results in the list, this response contains only the actual results.

" + "documentation":"

The maximum number of medical transcription jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" } } }, @@ -1685,7 +1751,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of vocabularies to return in the response.

" + "documentation":"

The maximum number of vocabularies to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" }, "StateEquals":{ "shape":"VocabularyState", @@ -1714,12 +1780,35 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"TranscribeArn", + "documentation":"

Lists all tags associated with a given Amazon Resource Name (ARN).

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"TranscribeArn", + "documentation":"

Lists all tags associated with the given Amazon Resource Name (ARN).

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Lists all tags associated with the given transcription job, vocabulary, or resource.

" + } + } + }, "ListTranscriptionJobsRequest":{ "type":"structure", "members":{ "Status":{ "shape":"TranscriptionJobStatus", - "documentation":"

When specified, returns only transcription jobs with the specified status. Jobs are ordered by creation date, with the newest jobs returned first. If you don’t specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.

" + "documentation":"

When specified, returns only transcription jobs with the specified status. Jobs are ordered by creation date, with the newest jobs returned first. If you don’t specify a status, Amazon Transcribe returns all transcription jobs ordered by creation date.

" }, "JobNameContains":{ "shape":"TranscriptionJobName", @@ -1731,7 +1820,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of jobs to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + "documentation":"

The maximum number of jobs to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" } } }, @@ -1761,7 +1850,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of vocabularies to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + "documentation":"

The maximum number of vocabularies to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" }, "StateEquals":{ "shape":"VocabularyState", @@ -1799,7 +1888,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of filters to return in the response. If there are fewer results in the list, this response contains only the actual results.

" + "documentation":"

The maximum number of filters to return in each page of results. If there are fewer results than the value you specify, only the actual results are returned. If you do not specify a value, the default of 5 is used.

" }, "NameContains":{ "shape":"VocabularyFilterName", @@ -1922,7 +2011,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

The FailureReason field contains one of the following values:

" + "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

The FailureReason field contains one of the following values:

" }, "Settings":{ "shape":"MedicalTranscriptionSetting", @@ -1934,11 +2023,15 @@ }, "Specialty":{ "shape":"Specialty", - "documentation":"

The medical specialty of any clinicians providing a dictation or having a conversation. PRIMARYCARE is the only available setting for this object. This specialty enables you to generate transcriptions for the following medical fields:

" + "documentation":"

The medical specialty of any clinicians providing a dictation or having a conversation. Refer to Transcribing a medical conversationfor a list of supported specialties.

" }, "Type":{ "shape":"Type", "documentation":"

The type of speech in the transcription job. CONVERSATION is generally used for patient-physician dialogues. DICTATION is the setting for physicians speaking their notes after seeing a patient. For more information, see What is Amazon Transcribe Medical?.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A key:value pair assigned to a given medical transcription job.

" } }, "documentation":"

The data structure that contains the information for a medical transcription job.

" @@ -1980,11 +2073,11 @@ }, "OutputLocationType":{ "shape":"OutputLocationType", - "documentation":"

Indicates the location of the transcription job's output.

The CUSTOMER_BUCKET is the S3 location provided in the OutputBucketName field when the

" + "documentation":"

Indicates the location of the transcription job's output. This field must be the path of an S3 bucket; if you don't already have an S3 bucket, one is created based on the path you add.

" }, "Specialty":{ "shape":"Specialty", - "documentation":"

The medical specialty of the transcription job. Primary care is the only valid value.

" + "documentation":"

The medical specialty of the transcription job. Refer to Transcribing a medical conversationfor a list of supported specialties.

" }, "ContentIdentificationType":{ "shape":"MedicalContentIdentificationType", @@ -2152,7 +2245,7 @@ "members":{ "StartPercentage":{ "shape":"Percentage", - "documentation":"

A value that indicates the percentage of the beginning of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:

This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.

" + "documentation":"

A value that indicates the percentage of the beginning of the time range. To set a relative time range, you must specify a start percentage and an end percentage. For example, if you specify the following values:

This looks at the time range starting from 10% of the way into the call to 50% of the way through the call. For a call that lasts 100,000 milliseconds, this example range would apply from the 10,000 millisecond mark to the 50,000 millisecond mark.

" }, "EndPercentage":{ "shape":"Percentage", @@ -2291,16 +2384,16 @@ "members":{ "CallAnalyticsJobName":{ "shape":"CallAnalyticsJobName", - "documentation":"

The name of the call analytics job. You can't use the string \".\" or \"..\" by themselves as the job name. The name must also be unique within an AWS account. If you try to create a call analytics job with the same name as a previous call analytics job, you get a ConflictException error.

" + "documentation":"

The name of the call analytics job. You can't use the string \".\" or \"..\" by themselves as the job name. The name must also be unique within an Amazon Web Services account. If you try to create a call analytics job with the same name as a previous call analytics job, you get a ConflictException error.

" }, "Media":{"shape":"Media"}, "OutputLocation":{ "shape":"Uri", - "documentation":"

The Amazon S3 location where the output of the call analytics job is stored. You can provide the following location types to store the output of call analytics job:

You can specify an AWS Key Management Service key to encrypt the output of our analytics job using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption of the analytics job output that is placed in your S3 bucket.

" + "documentation":"

The Amazon S3 location where the output of the call analytics job is stored. You can provide the following location types to store the output of call analytics job:

You can specify an Amazon Web Services Key Management Service (KMS) key to encrypt the output of our analytics job using the OutputEncryptionKMSKeyId parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for server-side encryption of the analytics job output that is placed in your S3 bucket.

" }, "OutputEncryptionKMSKeyId":{ "shape":"KMSKeyId", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Key Management Service key used to encrypt the output of the call analytics job. The user calling the operation must have permission to use the specified KMS key.

You use either of the following to identify an AWS KMS key in the current account:

You can use either of the following to identify a KMS key in the current account or another account:

If you don't specify an encryption key, the output of the call analytics job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputLocation parameter.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service key used to encrypt the output of the call analytics job. The user calling the operation must have permission to use the specified KMS key.

You use either of the following to identify an Amazon Web Services KMS key in the current account:

You can use either of the following to identify a KMS key in the current account or another account:

If you don't specify an encryption key, the output of the call analytics job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputLocation parameter.

" }, "DataAccessRoleArn":{ "shape":"DataAccessRoleArn", @@ -2379,7 +2472,11 @@ }, "Type":{ "shape":"Type", - "documentation":"

The type of speech in the input audio. CONVERSATION refers to conversations between two or more speakers, e.g., a conversations between doctors and patients. DICTATION refers to single-speaker dictated speech, e.g., for clinical notes.

" + "documentation":"

The type of speech in the input audio. CONVERSATION refers to conversations between two or more speakers, e.g., a conversations between doctors and patients. DICTATION refers to single-speaker dictated speech, such as clinical notes.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Add tags to an Amazon Transcribe medical transcription job.

" } } }, @@ -2405,7 +2502,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language code for the language used in the input media file.

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample rate of 16000 Hz or higher.

" + "documentation":"

The language code for the language used in the input media file.

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample rate of 16,000 Hz or higher.

" }, "MediaSampleRateHertz":{ "shape":"MediaSampleRateHertz", @@ -2429,7 +2526,7 @@ }, "OutputEncryptionKMSKeyId":{ "shape":"KMSKeyId", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartTranscriptionJob operation must have permission to use the specified KMS key.

You can use either of the following to identify a KMS key in the current account:

You can use either of the following to identify a KMS key in the current account or another account:

If you don't specify an encryption key, the output of the transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Key Management Service (KMS) key used to encrypt the output of the transcription job. The user calling the StartTranscriptionJob operation must have permission to use the specified KMS key.

You can use either of the following to identify a KMS key in the current account:

You can use either of the following to identify a KMS key in the current account or another account:

If you don't specify an encryption key, the output of the transcription job is encrypted with the default Amazon S3 key (SSE-S3).

If you specify a KMS key to encrypt your output, you must also specify an output location in the OutputBucketName parameter.

" }, "Settings":{ "shape":"Settings", @@ -2453,7 +2550,11 @@ }, "LanguageOptions":{ "shape":"LanguageOptions", - "documentation":"

An object containing a list of languages that might be present in your collection of audio files. Automatic language identification chooses a language that best matches the source audio from that list.

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample rate of 16000 Hz or higher.

" + "documentation":"

An object containing a list of languages that might be present in your collection of audio files. Automatic language identification chooses a language that best matches the source audio from that list.

To transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample rate of 16,000 Hz or higher.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Add tags to an Amazon Transcribe transcription job.

" } } }, @@ -2472,11 +2573,79 @@ "member":{"shape":"NonEmptyString"}, "min":1 }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The first part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the key is 'Department'.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The second part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the value is 'Sales'.

" + } + }, + "documentation":"

A key:value pair that adds metadata to a resource used by Amazon Transcribe. For example, a tag with the key:value pair ‘Department’:’Sales’ might be added to a resource to indicate its use by your organization's sales department.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":1 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"TranscribeArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to tag.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags you are assigning to a given Amazon Transcribe resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "TimestampMilliseconds":{ "type":"long", "max":14400000, "min":0 }, + "TranscribeArn":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"arn:aws(-[^:]+)?:transcribe:[a-zA-Z0-9-]*:[0-9]{12}:[a-zA-Z-]*/[0-9a-zA-Z._-]+" + }, "Transcript":{ "type":"structure", "members":{ @@ -2562,7 +2731,7 @@ }, "StartTime":{ "shape":"DateTime", - "documentation":"

A timestamp that shows with the job was started processing.

" + "documentation":"

A timestamp that shows when the job started processing.

" }, "CreationTime":{ "shape":"DateTime", @@ -2574,7 +2743,7 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

The FailureReason field can contain one of the following values:

" + "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

The FailureReason field can contain one of the following values:

" }, "Settings":{ "shape":"Settings", @@ -2603,6 +2772,10 @@ "IdentifiedLanguageScore":{ "shape":"IdentifiedLanguageScore", "documentation":"

A value between zero and one that Amazon Transcribe assigned to the language that it identified in the source audio. Larger values indicate that Amazon Transcribe has higher confidence in the language it identified.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A key:value pair assigned to a given transcription job.

" } }, "documentation":"

Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.

" @@ -2684,6 +2857,28 @@ "DICTATION" ] }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"TranscribeArn", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to remove tags from.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

A list of tag keys you want to remove from a specified Amazon Transcribe resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateCallAnalyticsCategoryRequest":{ "type":"structure", "required":[ @@ -2693,7 +2888,7 @@ "members":{ "CategoryName":{ "shape":"CategoryName", - "documentation":"

The name of the analytics category to update. The name is case sensitive. If you try to update a call analytics category with the same name as a previous category you will receive a ConflictException error.

" + "documentation":"

The name of the analytics category to update. The name is case sensitive. If you try to update a call analytics category with the same name as a previous category you will receive a ConflictException error.

" }, "Rules":{ "shape":"RuleList", @@ -2727,7 +2922,7 @@ }, "VocabularyFileUri":{ "shape":"Uri", - "documentation":"

The location in Amazon S3 of the text file that contains the you use for your custom vocabulary. The URI must be in the same Amazon Web Services Region as the resource that you are calling. The following is the format for a URI:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom Vocabularies.

" + "documentation":"

The location in Amazon S3 of the text file that contains your custom vocabulary. The URI must be in the same Amazon Web Services Region as the resource that you are calling. The following is the format for a URI:

https://s3.<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3.us-east-1.amazonaws.com/AWSDOC-EXAMPLE-BUCKET/vocab.txt

For more information about Amazon S3 object names, see Object Keys in the Amazon S3 Developer Guide.

For more information about custom vocabularies in Amazon Transcribe Medical, see Medical Custom Vocabularies.

" } } }, @@ -2800,7 +2995,7 @@ }, "LanguageCode":{ "shape":"LanguageCode", - "documentation":"

The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see what-is-transcribe.

" + "documentation":"

The language code of the vocabulary entries. For a list of languages and their corresponding language codes, see transcribe-whatis.

" }, "Phrases":{ "shape":"Phrases", From 1224e1360ea5b42e980dd363b1a4e6cb2655805f Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Tue, 24 Aug 2021 18:11:57 +0000 Subject: [PATCH 2/2] Bumping version to 1.21.28 --- .changes/1.21.28.json | 27 +++++++++++++++++++ .../api-change-iotdata-21712.json | 5 ---- .../api-change-mediaconvert-30507.json | 5 ---- .../next-release/api-change-polly-20334.json | 5 ---- .../next-release/api-change-ssm-87345.json | 5 ---- .../api-change-transcribe-42056.json | 5 ---- CHANGELOG.rst | 10 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 9 files changed, 39 insertions(+), 27 deletions(-) create mode 100644 .changes/1.21.28.json delete mode 100644 .changes/next-release/api-change-iotdata-21712.json delete mode 100644 .changes/next-release/api-change-mediaconvert-30507.json delete mode 100644 .changes/next-release/api-change-polly-20334.json delete mode 100644 .changes/next-release/api-change-ssm-87345.json delete mode 100644 .changes/next-release/api-change-transcribe-42056.json diff --git a/.changes/1.21.28.json b/.changes/1.21.28.json new file mode 100644 index 0000000000..b2d2211627 --- /dev/null +++ b/.changes/1.21.28.json @@ -0,0 +1,27 @@ +[ + { + "category": "``mediaconvert``", + "description": "AWS Elemental MediaConvert SDK has added MBAFF encoding support for AVC video and the ability to pass encryption context from the job settings to S3.", + "type": "api-change" + }, + { + "category": "``polly``", + "description": "Amazon Polly adds new New Zealand English voice - Aria. Aria is available as Neural voice only.", + "type": "api-change" + }, + { + "category": "``transcribe``", + "description": "This release adds support for feature tagging with Amazon Transcribe batch jobs.", + "type": "api-change" + }, + { + "category": "``ssm``", + "description": "Updated Parameter Store property for logging improvements.", + "type": "api-change" + }, + { + "category": "``iot-data``", + "description": "Updated Publish with support for new Retain flag and added two new API operations: GetRetainedMessage, ListRetainedMessages.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-iotdata-21712.json b/.changes/next-release/api-change-iotdata-21712.json deleted file mode 100644 index a47beaa1b1..0000000000 --- a/.changes/next-release/api-change-iotdata-21712.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``iot-data``", - "description": "Updated Publish with support for new Retain flag and added two new API operations: GetRetainedMessage, ListRetainedMessages." -} diff --git a/.changes/next-release/api-change-mediaconvert-30507.json b/.changes/next-release/api-change-mediaconvert-30507.json deleted file mode 100644 index 42b4103a1d..0000000000 --- a/.changes/next-release/api-change-mediaconvert-30507.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``mediaconvert``", - "description": "AWS Elemental MediaConvert SDK has added MBAFF encoding support for AVC video and the ability to pass encryption context from the job settings to S3." -} diff --git a/.changes/next-release/api-change-polly-20334.json b/.changes/next-release/api-change-polly-20334.json deleted file mode 100644 index 015ca4bd3e..0000000000 --- a/.changes/next-release/api-change-polly-20334.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``polly``", - "description": "Amazon Polly adds new New Zealand English voice - Aria. Aria is available as Neural voice only." -} diff --git a/.changes/next-release/api-change-ssm-87345.json b/.changes/next-release/api-change-ssm-87345.json deleted file mode 100644 index 6292de149a..0000000000 --- a/.changes/next-release/api-change-ssm-87345.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ssm``", - "description": "Updated Parameter Store property for logging improvements." -} diff --git a/.changes/next-release/api-change-transcribe-42056.json b/.changes/next-release/api-change-transcribe-42056.json deleted file mode 100644 index 7de6bc0d49..0000000000 --- a/.changes/next-release/api-change-transcribe-42056.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``transcribe``", - "description": "This release adds support for feature tagging with Amazon Transcribe batch jobs." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index df56cd009a..cf47fd2532 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,16 @@ CHANGELOG ========= +1.21.28 +======= + +* api-change:``mediaconvert``: AWS Elemental MediaConvert SDK has added MBAFF encoding support for AVC video and the ability to pass encryption context from the job settings to S3. +* api-change:``polly``: Amazon Polly adds new New Zealand English voice - Aria. Aria is available as Neural voice only. +* api-change:``transcribe``: This release adds support for feature tagging with Amazon Transcribe batch jobs. +* api-change:``ssm``: Updated Parameter Store property for logging improvements. +* api-change:``iot-data``: Updated Publish with support for new Retain flag and added two new API operations: GetRetainedMessage, ListRetainedMessages. + + 1.21.27 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index be3d24b6da..6288691848 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '1.21.27' +__version__ = '1.21.28' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 88e2e160e4..1903a2cc3f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.21.' # The full version, including alpha/beta/rc tags. -release = '1.21.27' +release = '1.21.28' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.