diff --git a/botocore/__init__.py b/botocore/__init__.py index b1af245614..9e7a3bd45c 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '0.51.0' +__version__ = '0.52.0' class NullHandler(logging.Handler): diff --git a/botocore/data/aws/elastictranscoder/2012-09-25.json b/botocore/data/aws/elastictranscoder/2012-09-25.json index 174be09446..bff8aedd60 100644 --- a/botocore/data/aws/elastictranscoder/2012-09-25.json +++ b/botocore/data/aws/elastictranscoder/2012-09-25.json @@ -23,8 +23,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n
The identifier of the job that you want to cancel.
\nTo get a list of the jobs (including their jobId
) that have a status of\n Submitted
, use the ListJobsByStatus API action.
The CancelJobRequest
structure.
The Id
of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.
The Id
of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.
The container type for the input file. If you want Elastic Transcoder to automatically detect the\n container type of the input file, specify auto
. If you want to specify the\n container type for the input file, enter one of the following values:
\n 3gp
, aac
, asf
, avi
, \n divx
, flv
, m4a
, mkv
, \n mov
, mp3
, mp4
, mpeg
, \n mpeg-ps
, mpeg-ts
, mxf
, ogg
, \n vob
, wav
, webm
\n
A section of the request body that provides information about the file that is being\n transcoded.
\n " + "documentation": "\nA section of the request body that provides information about the file that is being\n transcoded.
\n ", + "required": true }, "Output": { "shape_name": "CreateJobOutput", @@ -282,7 +284,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -290,7 +292,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n \tCEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nThe CreateJobOutput
structure.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -452,7 +532,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n \tCEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nThe CreateJobOutput
structure.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -765,7 +923,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -960,7 +1196,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.
\nConstraints: Maximum 40 characters.
\n " + "documentation": "\nThe name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.
\nConstraints: Maximum 40 characters.
\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket in which you saved the media files that you want to transcode.
\n " + "documentation": "\nThe Amazon S3 bucket in which you saved the media files that you want to transcode.
\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", @@ -1111,7 +1427,8 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.
\n " + "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.
\n ", + "required": true }, "Notifications": { "shape_name": "Notifications", @@ -1518,7 +1835,8 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\nThe name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.
\n " + "documentation": "\nThe name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.
\n ", + "required": true }, "Description": { "shape_name": "Description", @@ -1531,7 +1849,8 @@ "shape_name": "PresetContainer", "type": "string", "pattern": "(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^ogg$)", - "documentation": "\nThe container type for the output file. Valid values include mp3
, \n mp4
, ogg
, ts
, and webm
.
The container type for the output file. Valid values include mp3
, \n mp4
, ogg
, ts
, and webm
.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -1676,7 +1995,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -2039,7 +2358,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The identifier of the pipeline that you want to delete.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\nThe DeletePipelineRequest
structure.
The identifier of the preset for which you want to get detailed information.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\nThe DeletePresetRequest
structure.
The ID of the pipeline for which you want to get job information.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Ascending": { "shape_name": "Ascending", @@ -2596,7 +2915,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -2604,7 +2923,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -2799,7 +3196,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
To get information about all of the jobs associated with the current AWS account that\n have a given status, specify the following status: Submitted
,\n Progressing
, Complete
, Canceled
, or\n Error
.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -3218,7 +3693,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -3413,7 +3966,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -4067,7 +4698,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The identifier of the job for which you want to get detailed information.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\nThe ReadJobRequest
structure.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -4488,7 +5119,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -4683,7 +5392,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
The identifier of the pipeline to read.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\nThe ReadPipelineRequest
structure.
The identifier of the preset for which you want to get detailed information.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\nThe ReadPresetRequest
structure.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -5297,7 +6084,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.
\n " + "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.
\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.
\n " + "documentation": "\nThe Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.
\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.
\n " + "documentation": "\nThe Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.
\n ", + "required": true }, "Topics": { "shape_name": "SnsTopics", @@ -5472,7 +6262,8 @@ "documentation": null }, "max_length": 30, - "documentation": "\nThe ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.
\n " + "documentation": "\nThe ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.
\n ", + "required": true } }, "documentation": "\n The TestRoleRequest
structure.
The identifier of the pipeline for which you want to change notification settings.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Notifications": { "shape_name": "Notifications", @@ -6009,7 +6800,8 @@ "documentation": "\nThe Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
\n " } }, - "documentation": "\nThe topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.
\nThe topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.
\nThe UpdatePipelineNotificationsRequest
structure.
The identifier of the pipeline to update.
\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Status": { "shape_name": "PipelineStatus", "type": "string", "pattern": "(^Active$)|(^Paused$)", - "documentation": "\nThe desired status of the pipeline:
\nActive
: The pipeline is processing jobs.Paused
: The pipeline is not currently processing jobs.The desired status of the pipeline:
\nActive
: The pipeline is processing jobs.Paused
: The pipeline is not currently processing jobs.The UpdatePipelineStatusRequest
structure.
This is the Amazon Elastic MapReduce API Reference. This guide provides descriptions and\n samples of the Amazon Elastic MapReduce APIs.
\n\nAmazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of\n data efficiently. Amazon EMR uses Hadoop processing combined with several AWS\n products to do tasks such as web indexing, data mining, log file analysis, machine\n learning, scientific simulation, and data warehousing.
\n\n ", "operations": { "AddInstanceGroups": { @@ -142,7 +143,7 @@ "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", "min_length": 0, "max_length": 256, - "documentation": "\nA string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from DescribeJobFlows.
\n ", + "documentation": "\nA string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from ListClusters.
\n ", "required": true }, "Steps": { @@ -241,7 +242,7 @@ }, "documentation": "\nSpecification of a job flow step.
\n " }, - "documentation": "\nA list of StepConfig to be executed by the job flow.
\n ", + "documentation": "\nA list of StepConfig to be executed by the job flow.
\n ", "required": true } }, @@ -265,7 +266,7 @@ "documentation": "\nThe identifiers of the list of steps added to the job flow.
\n " } }, - "documentation": "\nThe output for the AddJobFlowSteps operation.
\n " + "documentation": "\nThe output for the AddJobFlowSteps operation.
\n " }, "errors": [ { @@ -286,7 +287,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\nThe Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.
\n " + "documentation": "\nThe Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.
\n ", + "required": true }, "Tags": { "shape_name": "TagList", @@ -298,26 +300,27 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, - "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.
\n " + "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.
\n ", + "required": true } }, - "documentation": "\nThis input identifies a cluster and a list of tags to attach.\n
\n " + "documentation": "\nThis input identifies a cluster and a list of tags to attach. \n
\n " }, "output": { "shape_name": "AddTagsOutput", "type": "structure", "members": {}, - "documentation": "\nThis output indicates the result of adding tags to a resource. \n
\n " + "documentation": "\nThis output indicates the result of adding tags to a resource. \n
\n " }, "errors": [ { @@ -327,10 +330,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -341,18 +344,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], - "documentation": "\nAdds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nAdds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\nThe identifier of the cluster to describe.
\n " + "documentation": "\nThe identifier of the cluster to describe.
\n ", + "required": true } }, "documentation": "\nThis input determines which cluster to describe.
\n " @@ -480,7 +484,7 @@ "documentation": "\nThe IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
\n " } }, - "documentation": "\nProvides information about the EC2 instances in a cluster grouped by category. For example, EC2 Key Name, Subnet Id, Instance Profile, and so on.
\n " + "documentation": "\nProvides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.
\n " }, "LogUri": { "shape_name": "String", @@ -555,7 +559,7 @@ "documentation": "\nThis option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.
" } }, - "documentation": "\nAn application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:
\nAn application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:
\nThe applications installed on this cluster.
\n " }, @@ -569,17 +573,22 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, - "documentation": "\nA list of tags associated with cluster.
" + "documentation": "\nA list of tags associated with a cluster.
" + }, + "ServiceRole": { + "shape_name": "String", + "type": "string", + "documentation": "\nThe IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.
\n " } }, "documentation": "\nThis output contains the details for the requested cluster.
\n " @@ -595,10 +604,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -609,15 +618,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.
\n \n " @@ -1236,6 +1245,14 @@ "min_length": 0, "max_length": 10280, "documentation": "\nThe IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
\n " + }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\nThe IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.
\n " } }, "documentation": "\nA description of a job flow.
\n " @@ -1253,7 +1270,7 @@ "documentation": "\nIndicates that an error occurred while processing the request and that the request was not\n completed.
\n " } ], - "documentation": "\nDescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.
\nRegardless of supplied parameters, only job flows created within the last two months are\n returned.
\nIf no parameters are supplied, then job flows matching either of the following criteria\n are returned:
\nRUNNING
, WAITING
, SHUTTING_DOWN
,\n STARTING
\n Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.
\nThis API is deprecated and will eventually be removed. We recommend you use ListClusters,\n DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions\n instead.
\nDescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.
\nRegardless of supplied parameters, only job flows created within the last two months are\n returned.
\nIf no parameters are supplied, then job flows matching either of the following criteria\n are returned:
\nRUNNING
, WAITING
, SHUTTING_DOWN
,\n STARTING
\n Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.
\nThe identifier of the cluster with steps to describe.
\n " + "documentation": "\nThe identifier of the cluster with steps to describe.
\n ", + "required": true }, "StepId": { "shape_name": "StepId", "type": "string", - "documentation": "\nThe identifier of the step to describe.
\n " + "documentation": "\nThe identifier of the step to describe.
\n ", + "required": true } }, "documentation": "\nThis input determines which step to describe.
\n " @@ -1343,7 +1362,7 @@ "CANCEL_AND_WAIT", "CONTINUE" ], - "documentation": "\nThis specifies what action to take when the cluster step fails. TERMINATE_JOB_FLOW is deprecated, use TERMINATE_CLUSTER instead.\n
\n " + "documentation": "\nThis specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.\n
\n " }, "Status": { "shape_name": "StepStatus", @@ -1394,12 +1413,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n
\n " + "documentation": "\nThe date and time when the cluster step execution started.\n
\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n
\n " + "documentation": "\nThe date and time when the cluster step execution completed or failed.\n
\n " } }, "documentation": "\nThe timeline of the cluster step status over time.\n
\n " @@ -1421,10 +1440,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1435,15 +1454,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides more detail about the cluster step.
\n " @@ -1457,12 +1476,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe cluster identifier for the bootstrap actions to list.
\n " + "documentation": "\nThe cluster identifier for the bootstrap actions to list\n .
\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve\n .
\n " } }, "documentation": "\nThis input determines which bootstrap actions to retrieve.
\n " @@ -1501,15 +1521,15 @@ }, "documentation": "\nAn entity describing an executable that runs on a cluster.
\n " }, - "documentation": "\nThe bootstrap actions associated with the cluster.
\n " + "documentation": "\nThe bootstrap actions associated with the cluster\n .
\n " }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve\n .
\n " } }, - "documentation": "\nThis output contains the bootstrap actions detail.
\n " + "documentation": "\nThis output contains the boostrap actions detail\n .
\n " }, "errors": [ { @@ -1519,10 +1539,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1533,15 +1553,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides information about the bootstrap actions associated with a cluster.
\n \n ", @@ -1561,12 +1581,12 @@ "CreatedAfter": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe creation date and time beginning value filter for listing clusters.
\n " + "documentation": "\nThe creation date and time beginning value filter for listing clusters\n .
\n " }, "CreatedBefore": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe creation date and time end value filter for listing clusters.
\n " + "documentation": "\nThe creation date and time end value filter for listing clusters\n .
\n " }, "ClusterStates": { "shape_name": "ClusterStateList", @@ -1590,7 +1610,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.\n
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.\n
\n " } }, "documentation": "\nThis input determines how the ListClusters action filters the list of clusters that it returns.
\n " @@ -1693,7 +1713,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned. \n
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.\n
\n " } }, "documentation": "\nThis contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.
\n " @@ -1706,10 +1726,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1720,15 +1740,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status.\n This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls. \n
\n \n ", @@ -1748,12 +1768,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the instance groups.
\n " + "documentation": "\nThe identifier of the cluster for which to list the instance groups.
\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instance groups to retrieve.
\n " @@ -1840,7 +1861,7 @@ "SHUTTING_DOWN", "ENDED" ], - "documentation": "\nThe current state of the instance group. The following values are deprecated: ARRESTED, SHUTTING_DOWN, and ENDED. Use SUSPENDED, TERMINATING, and TERMINATED instead, respectively.\n
\n " + "documentation": "\nThe current state of the instance group.
\n " }, "StateChangeReason": { "shape_name": "InstanceGroupStateChangeReason", @@ -1898,7 +1919,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instance groups to retrieve.
\n " @@ -1911,10 +1932,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1925,15 +1946,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides all available details about the instance groups in a cluster.
\n \n ", @@ -1953,7 +1974,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the instances.
\n " + "documentation": "\nThe identifier of the cluster for which to list the instances.
\n ", + "required": true }, "InstanceGroupId": { "shape_name": "InstanceGroupId", @@ -1978,7 +2000,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instances to list.
\n " @@ -2097,7 +2119,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis output contains the list of instances.
\n " @@ -2110,10 +2132,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2124,15 +2146,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. \n For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.\n
\n ", @@ -2152,7 +2174,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the steps.
\n " + "documentation": "\nThe identifier of the cluster for which to list the steps.
\n ", + "required": true }, "StepStates": { "shape_name": "StepStateList", @@ -2175,7 +2198,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which steps to list.
\n " @@ -2250,12 +2273,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n
\n " + "documentation": "\nThe date and time when the cluster step execution started.\n
\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n
\n " + "documentation": "\nThe date and time when the cluster step execution completed or failed.\n
\n " } }, "documentation": "\nThe timeline of the cluster step status over time.\n
\n " @@ -2271,7 +2294,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis output contains the list of steps.
\n " @@ -2284,10 +2307,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2298,15 +2321,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides a list of steps for the cluster. \n
\n ", @@ -2352,7 +2375,7 @@ "type": "string", "documentation": null }, - "documentation": "\nThe EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.
\n " + "documentation": "\nThe EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.
\n " } }, "documentation": "\nModify an instance group size.
\n " @@ -2382,7 +2405,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\nThe Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.
\n " + "documentation": "\nThe Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.
\n ", + "required": true }, "TagKeys": { "shape_name": "StringList", @@ -2392,16 +2416,17 @@ "type": "string", "documentation": null }, - "documentation": "\nA list of tag keys to remove from a resource.
\n " + "documentation": "\nA list of tag keys to remove from a resource.
\n ", + "required": true } }, - "documentation": "\nThis input identifies a cluster and a list of tags to remove. \n
\n " + "documentation": "\nThis input identifies a cluster and a list of tags to remove. \n
\n " }, "output": { "shape_name": "RemoveTagsOutput", "type": "structure", "members": {}, - "documentation": "\nThis output indicates the result of removing tags from a resource. \n
\n " + "documentation": "\nThis output indicates the result of removing tags from a resource. \n
\n " }, "errors": [ { @@ -2411,10 +2436,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2425,18 +2450,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], - "documentation": "\nRemoves tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n \n " + "documentation": "\nRemoves tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\nThe following example removes the stack tag with value Prod from a cluster:
\nThe following example removes the stack and hbase tags from a cluster:
\nAn IAM role for the job flow. The EC2 instances of the job flow assume this role. The default role is EMRJobflowDefault
. In order to use the default role, you must have already created it using the CLI.
IAM role that Amazon ElasticMapReduce will assume to work with AWS resources on your behalf. You may set this parameter to the name of an existing IAM role.
\n " + }, "Tags": { "shape_name": "TagList", "type": "list", @@ -2837,15 +2870,15 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances.
\n " } @@ -2992,7 +3025,6 @@ "documentation": "\n\n TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step\n not yet completed is canceled and the EC2 instances on which the job flow is running are\n stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was\n specified when the job flow was created. \n
\n\n The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, \n it may take up to 5-20 minutes for the job flow to \n completely terminate and release allocated resources, such as Amazon EC2 instances.\n
\n \nThe identifier of the job that you want to cancel.
\nTo get a list of the jobs (including their jobId
) that have a status of\n Submitted
, use the ListJobsByStatus API action.
The Id
of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.
The Id
of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.
The container type for the input file. If you want Elastic Transcoder to automatically detect the\n container type of the input file, specify auto
. If you want to specify the\n container type for the input file, enter one of the following values:
\n 3gp
, aac
, asf
, avi
, \n divx
, flv
, m4a
, mkv
, \n mov
, mp3
, mp4
, mpeg
, \n mpeg-ps
, mpeg-ts
, mxf
, ogg
, \n vob
, wav
, webm
\n
A section of the request body that provides information about the file that is being\n transcoded.
\n " + "documentation": "\nA section of the request body that provides information about the file that is being\n transcoded.
\n ", + "required": true }, "Output": { "shape_name": "CreateJobOutput", @@ -288,7 +291,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -296,7 +299,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n \tCEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nThe CreateJobOutput
structure.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -458,7 +539,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n \tCEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nThe CreateJobOutput
structure.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -771,7 +930,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -966,7 +1203,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.
\nConstraints: Maximum 40 characters.
\n " + "documentation": "\nThe name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.
\nConstraints: Maximum 40 characters.
\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket in which you saved the media files that you want to transcode.
\n " + "documentation": "\nThe Amazon S3 bucket in which you saved the media files that you want to transcode.
\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", @@ -1123,7 +1440,8 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.
\n " + "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.
\n ", + "required": true }, "Notifications": { "shape_name": "Notifications", @@ -1536,7 +1854,8 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\nThe name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.
\n " + "documentation": "\nThe name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.
\n ", + "required": true }, "Description": { "shape_name": "Description", @@ -1549,7 +1868,8 @@ "shape_name": "PresetContainer", "type": "string", "pattern": "(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^ogg$)", - "documentation": "\nThe container type for the output file. Valid values include mp3
, \n mp4
, ogg
, ts
, and webm
.
The container type for the output file. Valid values include mp3
, \n mp4
, ogg
, ts
, and webm
.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -1694,7 +2014,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -2057,7 +2377,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The identifier of the pipeline that you want to delete.
\n ", + "required": true, "location": "uri" } }, @@ -2293,6 +2614,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\nThe identifier of the preset for which you want to get detailed information.
\n ", + "required": true, "location": "uri" } }, @@ -2359,6 +2681,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\nThe ID of the pipeline for which you want to get job information.
\n ", + "required": true, "location": "uri" }, "Ascending": { @@ -2629,7 +2952,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -2637,7 +2960,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -2832,7 +3233,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
To get information about all of the jobs associated with the current AWS account that\n have a given status, specify the following status: Submitted
,\n Progressing
, Complete
, Canceled
, or\n Error
.
The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -3249,7 +3729,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -3444,7 +4002,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
If you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -4095,7 +4731,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The identifier of the job for which you want to get detailed information.
\n ", + "required": true, "location": "uri" } }, @@ -4505,7 +5142,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -4513,7 +5150,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nIf you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.
\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.
\n " + "documentation": "\nThe duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.
\nIf you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.
\n " } }, "documentation": "\nSettings that determine when a clip begins and how long it lasts.
\n " @@ -4708,7 +5423,85 @@ }, "documentation": "\nSettings for one clip in a composition. All jobs in a playlist must have the same clip settings.
\n " }, - "documentation": "\nYou can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.
\n " + "documentation": "\nYou can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.
\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\nA policy that determines how Elastic Transcoder handles the existence of multiple captions.
\nMergeOverride: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the sidecar captions and ignores the\n embedded captions for that language.
MergeRetain: Elastic Transcoder transcodes both embedded\n and sidecar captions into outputs. If captions for a language are embedded in the input\n file and also appear in a sidecar file, Elastic Transcoder uses the embedded captions and ignores the\n sidecar captions for that language. If
Override: Elastic Transcoder transcodes only the\n sidecar captions that you specify in CaptionSources
.
MergePolicy
cannot be null.
The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.
\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\nA string that specifies the language of the caption. Specify this as one of:
\n2-character ISO 639-1 code
3-character ISO 639-2 code
For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.
\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\nFor clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset
tells Elastic Transcoder how much of the video to encode\n before including captions.
Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.
\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\nThe label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.
\n " + } + }, + "documentation": "\nA source file for the input sidecar captions used during the transcoding\n process.
\n " + }, + "max_length": 20, + "documentation": "\nSource files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources
blank.
The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.
\nValid Embedded Caption Formats:
\nFor MP3: None
For MP4: mov-text
For MPEG-TS: None
For ogg: None
For webm: None
Valid Sidecar Caption Formats: Elastic Transcoder\n supports dfxp (first div element only), scc, srt, and webvtt. There are\n no container restrictions on sidecar captions. If you want ttml or smpte-tt\n compatible captions, specify dfxp as your output format.
\nThe prefix for caption filenames, in the form description-{language}
, where:
{language}
is a literal value that Elastic Transcoder replaces with the two- or three-letter\n code for the language of the caption in the output file names.If you don't include {language}
in the file name pattern, Elastic Transcoder automatically\n appends \"{language}
\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.
For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.
\n " + } + }, + "documentation": "\nThe file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + }, + "max_length": 4, + "documentation": "\nThe array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.
\n " + } + }, + "documentation": "\nYou can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:
\nEmbedded: Embedded captions\n are included in the same file as the audio and video. Elastic Transcoder supports\n only one embedded caption per language, to a maximum of 300 embedded captions per file.
\nValid input values include: CEA-608 (EIA-608
, first non-empty channel only),\n CEA-708 (EIA-708
, first non-empty channel only), and mov-text
Valid outputs include: mov-text
Elastic Transcoder supports a maximum of one embedded format per output.
\nSidecar: Sidecar captions are kept in a\n separate metadata file from the audio and video data. Sidecar captions require a player\n that is capable of understanding the relationship between the video file and the sidecar\n file. Elastic Transcoder supports only one sidecar caption per language, to a maximum of 20 sidecar\n captions per file.
\nValid input values include: dfxp
(first div element only),\n ebu-tt
, scc
, smpt
, srt
,\n ttml
(first div element only), and webvtt
Valid outputs include: dfxp
(first div element only), scc
,\n srt
, and webvtt
.
If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.
\nElastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.
\nTo remove captions or leave the captions empty, set Captions
to null. To pass through\n existing captions unchanged, set the MergePolicy
to MergeRetain
,\n and pass in a null CaptionSources
array.
For more information on embedded files, see the Subtitles Wikipedia page.
\nFor more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.
\n " } }, "documentation": "\nOutput
object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs
\n object.
The identifier of the pipeline to read.
\n ", + "required": true, "location": "uri" } }, @@ -5099,6 +5893,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\nThe identifier of the preset for which you want to get detailed information.
\n ", + "required": true, "location": "uri" } }, @@ -5179,7 +5974,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\nIf you specified AAC
for Audio:Codec
, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:
auto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
You can only choose an audio profile when you specify AAC for the value of Audio:Codec.
\nSpecify the AAC profile for the output file. Elastic Transcoder supports the following profiles:
\nauto
: If you specify auto
, Elastic Transcoder will select\n the profile based on the bit rate selected for the output file.AAC-LC
: The most common AAC profile. Use for bitrates larger than\n 64 kbps.HE-AAC
: Not supported on some older players and devices.\n Use for bitrates between 40 and 80 kbps.HE-AACv2
: Not supported on some players and devices.\n Use for bitrates less than 48 kbps.If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.
If you specified AAC
for Audio:Codec
, this is the AAC
\n compression profile to use. Valid values include:
auto
, AAC-LC
, HE-AAC
, HE-AACv2
If you specify auto
, Elastic Transcoder chooses a profile based on the bit rate of the output file.
The maximum width of the watermark in one of the following formats:
MaxWidth
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxWidth
.The maximum height of the watermark in one of the following formats:
MaxHeight
.Target
to specify whether you want Elastic Transcoder to include the black\n bars that are added by Elastic Transcoder, if any, in the calculation.MaxHeight
.\n "
},
"SizingPolicy": {
@@ -5330,7 +6125,7 @@
"HorizontalOffset": {
"shape_name": "PixelsOrPercent",
"type": "string",
- "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)",
+ "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)",
"documentation": "\n The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:
HorizontalAlign
and 5px for\n HorizontalOffset
, the left side of the watermark appears 5 pixels from\n the left border of the output video.\n HorizontalOffset
is only valid when the value of\n HorizontalAlign
is Left
or Right
. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.
Use the value of Target
to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.
VerticalOffset
\n The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:
MaxHeight
.Top
for VerticalAlign
and\n 5px
for VerticalOffset
, the top of the watermark appears 5\n pixels from the top border of the output video.\n VerticalOffset
is only valid when the value of VerticalAlign is Top or\n Bottom.
If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.
\n\nUse the value of Target
to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.
The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.
\n " + "documentation": "\nThe IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.
\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.
\n " + "documentation": "\nThe Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.
\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\nThe Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.
\n " + "documentation": "\nThe Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.
\n ", + "required": true }, "Topics": { "shape_name": "SnsTopics", @@ -5510,7 +6308,8 @@ "documentation": null }, "max_length": 30, - "documentation": "\nThe ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.
\n " + "documentation": "\nThe ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.
\n ", + "required": true } }, "documentation": "\n The TestRoleRequest
structure.
The identifier of the pipeline for which you want to change notification settings.
\n ", + "required": true, "location": "uri" }, "Notifications": { @@ -6057,7 +6857,8 @@ "documentation": "\nThe Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.
\n " } }, - "documentation": "\nThe topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.
\nThe topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.
\nThe UpdatePipelineNotificationsRequest
structure.
The identifier of the pipeline to update.
\n ", + "required": true, "location": "uri" }, "Status": { "shape_name": "PipelineStatus", "type": "string", "pattern": "(^Active$)|(^Paused$)", - "documentation": "\nThe desired status of the pipeline:
\nActive
: The pipeline is processing jobs.Paused
: The pipeline is not currently processing jobs.The desired status of the pipeline:
\nActive
: The pipeline is processing jobs.Paused
: The pipeline is not currently processing jobs.The UpdatePipelineStatusRequest
structure.
This is the Amazon Elastic MapReduce API Reference. This guide provides descriptions and\n samples of the Amazon Elastic MapReduce APIs.
\n\nAmazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of\n data efficiently. Amazon EMR uses Hadoop processing combined with several AWS\n products to do tasks such as web indexing, data mining, log file analysis, machine\n learning, scientific simulation, and data warehousing.
\n\n ", "operations": { "AddInstanceGroups": { @@ -143,7 +144,7 @@ "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", "min_length": 0, "max_length": 256, - "documentation": "\nA string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from DescribeJobFlows.
\n ", + "documentation": "\nA string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from ListClusters.
\n ", "required": true }, "Steps": { @@ -242,7 +243,7 @@ }, "documentation": "\nSpecification of a job flow step.
\n " }, - "documentation": "\nA list of StepConfig to be executed by the job flow.
\n ", + "documentation": "\nA list of StepConfig to be executed by the job flow.
\n ", "required": true } }, @@ -266,7 +267,7 @@ "documentation": "\nThe identifiers of the list of steps added to the job flow.
\n " } }, - "documentation": "\nThe output for the AddJobFlowSteps operation.
\n " + "documentation": "\nThe output for the AddJobFlowSteps operation.
\n " }, "errors": [ { @@ -288,7 +289,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\nThe Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.
\n " + "documentation": "\nThe Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.
\n ", + "required": true }, "Tags": { "shape_name": "TagList", @@ -300,27 +302,28 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, - "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.
\n " + "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.
\n ", + "required": true } }, - "documentation": "\nThis input identifies a cluster and a list of tags to attach.\n
\n " + "documentation": "\nThis input identifies a cluster and a list of tags to attach. \n
\n " }, "output": { "shape_name": "AddTagsOutput", "type": "structure", "members": { }, - "documentation": "\nThis output indicates the result of adding tags to a resource. \n
\n " + "documentation": "\nThis output indicates the result of adding tags to a resource. \n
\n " }, "errors": [ { @@ -330,10 +333,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -344,18 +347,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], - "documentation": "\nAdds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nAdds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\nThe identifier of the cluster to describe.
\n " + "documentation": "\nThe identifier of the cluster to describe.
\n ", + "required": true } }, "documentation": "\nThis input determines which cluster to describe.
\n " @@ -483,7 +487,7 @@ "documentation": "\nThe IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
\n " } }, - "documentation": "\nProvides information about the EC2 instances in a cluster grouped by category. For example, EC2 Key Name, Subnet Id, Instance Profile, and so on.
\n " + "documentation": "\nProvides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.
\n " }, "LogUri": { "shape_name": "String", @@ -558,7 +562,7 @@ "documentation": "\nThis option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.
" } }, - "documentation": "\nAn application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:
\nAn application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:
\nThe applications installed on this cluster.
\n " }, @@ -572,17 +576,22 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, - "documentation": "\nA list of tags associated with cluster.
" + "documentation": "\nA list of tags associated with a cluster.
" + }, + "ServiceRole": { + "shape_name": "String", + "type": "string", + "documentation": "\nThe IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.
\n " } }, "documentation": "\nThis output contains the details for the requested cluster.
\n " @@ -598,10 +607,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -612,15 +621,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.
\n \n " @@ -1239,6 +1248,14 @@ "min_length": 0, "max_length": 10280, "documentation": "\nThe IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.
\n " + }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\nThe IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.
\n " } }, "documentation": "\nA description of a job flow.
\n " @@ -1257,7 +1274,7 @@ "documentation": "\nIndicates that an error occurred while processing the request and that the request was not\n completed.
\n " } ], - "documentation": "\nDescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.
\nRegardless of supplied parameters, only job flows created within the last two months are\n returned.
\nIf no parameters are supplied, then job flows matching either of the following criteria\n are returned:
\nRUNNING
, WAITING
, SHUTTING_DOWN
,\n STARTING
\n Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.
\nThis API is deprecated and will eventually be removed. We recommend you use ListClusters,\n DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions\n instead.
\nDescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.
\nRegardless of supplied parameters, only job flows created within the last two months are\n returned.
\nIf no parameters are supplied, then job flows matching either of the following criteria\n are returned:
\nRUNNING
, WAITING
, SHUTTING_DOWN
,\n STARTING
\n Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.
\nThe identifier of the cluster with steps to describe.
\n " + "documentation": "\nThe identifier of the cluster with steps to describe.
\n ", + "required": true }, "StepId": { "shape_name": "StepId", "type": "string", - "documentation": "\nThe identifier of the step to describe.
\n " + "documentation": "\nThe identifier of the step to describe.
\n ", + "required": true } }, "documentation": "\nThis input determines which step to describe.
\n " @@ -1347,7 +1366,7 @@ "CANCEL_AND_WAIT", "CONTINUE" ], - "documentation": "\nThis specifies what action to take when the cluster step fails. TERMINATE_JOB_FLOW is deprecated, use TERMINATE_CLUSTER instead.\n
\n " + "documentation": "\nThis specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.\n
\n " }, "Status": { "shape_name": "StepStatus", @@ -1398,12 +1417,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n
\n " + "documentation": "\nThe date and time when the cluster step execution started.\n
\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n
\n " + "documentation": "\nThe date and time when the cluster step execution completed or failed.\n
\n " } }, "documentation": "\nThe timeline of the cluster step status over time.\n
\n " @@ -1425,10 +1444,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1439,15 +1458,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides more detail about the cluster step.
\n " @@ -1461,12 +1480,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe cluster identifier for the bootstrap actions to list.
\n " + "documentation": "\nThe cluster identifier for the bootstrap actions to list\n .
\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve\n .
\n " } }, "documentation": "\nThis input determines which bootstrap actions to retrieve.
\n " @@ -1505,15 +1525,15 @@ }, "documentation": "\nAn entity describing an executable that runs on a cluster.
\n " }, - "documentation": "\nThe bootstrap actions associated with the cluster.
\n " + "documentation": "\nThe bootstrap actions associated with the cluster\n .
\n " }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve\n .
\n " } }, - "documentation": "\nThis output contains the bootstrap actions detail.
\n " + "documentation": "\nThis output contains the boostrap actions detail\n .
\n " }, "errors": [ { @@ -1523,10 +1543,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1537,15 +1557,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides information about the bootstrap actions associated with a cluster.
\n \n " @@ -1559,12 +1579,12 @@ "CreatedAfter": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe creation date and time beginning value filter for listing clusters.
\n " + "documentation": "\nThe creation date and time beginning value filter for listing clusters\n .
\n " }, "CreatedBefore": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe creation date and time end value filter for listing clusters.
\n " + "documentation": "\nThe creation date and time end value filter for listing clusters\n .
\n " }, "ClusterStates": { "shape_name": "ClusterStateList", @@ -1588,7 +1608,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.\n
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.\n
\n " } }, "documentation": "\nThis input determines how the ListClusters action filters the list of clusters that it returns.
\n " @@ -1691,7 +1711,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned. \n
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.\n
\n " } }, "documentation": "\nThis contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.
\n " @@ -1704,10 +1724,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1718,15 +1738,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status.\n This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls. \n
\n \n " @@ -1740,12 +1760,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the instance groups.
\n " + "documentation": "\nThe identifier of the cluster for which to list the instance groups.
\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instance groups to retrieve.
\n " @@ -1832,7 +1853,7 @@ "SHUTTING_DOWN", "ENDED" ], - "documentation": "\nThe current state of the instance group. The following values are deprecated: ARRESTED, SHUTTING_DOWN, and ENDED. Use SUSPENDED, TERMINATING, and TERMINATED instead, respectively.\n
\n " + "documentation": "\nThe current state of the instance group.
\n " }, "StateChangeReason": { "shape_name": "InstanceGroupStateChangeReason", @@ -1890,7 +1911,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instance groups to retrieve.
\n " @@ -1903,10 +1924,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -1917,15 +1938,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides all available details about the instance groups in a cluster.
\n \n " @@ -1939,7 +1960,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the instances.
\n " + "documentation": "\nThe identifier of the cluster for which to list the instances.
\n ", + "required": true }, "InstanceGroupId": { "shape_name": "InstanceGroupId", @@ -1964,7 +1986,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which instances to list.
\n " @@ -2083,7 +2105,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis output contains the list of instances.
\n " @@ -2096,10 +2118,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2110,15 +2132,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. \n For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.\n
\n " @@ -2132,7 +2154,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\nThe identifier of the cluster for which to list the steps.
\n " + "documentation": "\nThe identifier of the cluster for which to list the steps.
\n ", + "required": true }, "StepStates": { "shape_name": "StepStateList", @@ -2155,7 +2178,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis input determines which steps to list.
\n " @@ -2230,12 +2253,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n
\n " + "documentation": "\nThe date and time when the cluster step execution started.\n
\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\nThe date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n
\n " + "documentation": "\nThe date and time when the cluster step execution completed or failed.\n
\n " } }, "documentation": "\nThe timeline of the cluster step status over time.\n
\n " @@ -2251,7 +2274,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\nThe pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.
\n " + "documentation": "\nThe pagination token that indicates the next set of results to retrieve.
\n " } }, "documentation": "\nThis output contains the list of steps.
\n " @@ -2264,10 +2287,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2278,15 +2301,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], "documentation": "\nProvides a list of steps for the cluster. \n
\n " @@ -2326,7 +2349,7 @@ "type": "string", "documentation": null }, - "documentation": "\nThe EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.
\n " + "documentation": "\nThe EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.
\n " } }, "documentation": "\nModify an instance group size.
\n " @@ -2357,7 +2380,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\nThe Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.
\n " + "documentation": "\nThe Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.
\n ", + "required": true }, "TagKeys": { "shape_name": "StringList", @@ -2367,17 +2391,18 @@ "type": "string", "documentation": null }, - "documentation": "\nA list of tag keys to remove from a resource.
\n " + "documentation": "\nA list of tag keys to remove from a resource.
\n ", + "required": true } }, - "documentation": "\nThis input identifies a cluster and a list of tags to remove. \n
\n " + "documentation": "\nThis input identifies a cluster and a list of tags to remove. \n
\n " }, "output": { "shape_name": "RemoveTagsOutput", "type": "structure", "members": { }, - "documentation": "\nThis output indicates the result of removing tags from a resource. \n
\n " + "documentation": "\nThis output indicates the result of removing tags from a resource. \n
\n " }, "errors": [ { @@ -2387,10 +2412,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n \n " + "documentation": "\nThis exception occurs when there is an internal failure in the EMR service.
\n\n " }, { "shape_name": "InvalidRequestException", @@ -2401,18 +2426,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\nThe error code associated with the exception.
\n \n " + "documentation": "\nThe error code associated with the exception.
\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\nThe message associated with the exception.
\n \n " + "documentation": "\nThe message associated with the exception.
\n\n " } }, - "documentation": "\nThis exception occurs when there is something wrong with user input.
\n \n " + "documentation": "\nThis exception occurs when there is something wrong with user input.
\n\n " } ], - "documentation": "\nRemoves tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n \n " + "documentation": "\nRemoves tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\nThe following example removes the stack tag with value Prod from a cluster:
\nThe following example removes the stack and hbase tags from a cluster:
\nAn IAM role for the job flow. The EC2 instances of the job flow assume this role. The default role is EMRJobflowDefault
. In order to use the default role, you must have already created it using the CLI.
IAM role that Amazon ElasticMapReduce will assume to work with AWS resources on your behalf. You may set this parameter to the name of an existing IAM role.
\n " + }, "Tags": { "shape_name": "TagList", "type": "list", @@ -2813,15 +2846,15 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\nA user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n
\n " } }, - "documentation": "\nA key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " + "documentation": "\nA key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n
\n " }, "documentation": "\nA list of tags to associate with a cluster and propagate to Amazon EC2 instances.
\n " } @@ -2972,4 +3005,4 @@ "documentation": "\n\n TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step\n not yet completed is canceled and the EC2 instances on which the job flow is running are\n stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was\n specified when the job flow was created. \n
\n\n The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, \n it may take up to 5-20 minutes for the job flow to \n completely terminate and release allocated resources, such as Amazon EC2 instances.\n
\n \n