diff --git a/botocore/__init__.py b/botocore/__init__.py index b1af245614..9e7a3bd45c 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import re import logging -__version__ = '0.51.0' +__version__ = '0.52.0' class NullHandler(logging.Handler): diff --git a/botocore/data/aws/elastictranscoder/2012-09-25.json b/botocore/data/aws/elastictranscoder/2012-09-25.json index 174be09446..bff8aedd60 100644 --- a/botocore/data/aws/elastictranscoder/2012-09-25.json +++ b/botocore/data/aws/elastictranscoder/2012-09-25.json @@ -23,8 +23,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the job that you want to cancel.

\n

To get a list of the jobs (including their jobId) that have a status of\n Submitted, use the ListJobsByStatus API action.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": " \n

The CancelJobRequest structure.

\n " @@ -90,7 +90,8 @@ "shape_name": "Id", "type": "string", "pattern": "^\\d{13}-\\w{6}$", - "documentation": "\n

The Id of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.

\n " + "documentation": "\n

The Id of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.

\n ", + "required": true }, "Input": { "shape_name": "JobInput", @@ -134,7 +135,8 @@ "documentation": "\n

The container type for the input file. If you want Elastic Transcoder to automatically detect the\n container type of the input file, specify auto. If you want to specify the\n container type for the input file, enter one of the following values:

\n

\n 3gp, aac, asf, avi, \n divx, flv, m4a, mkv, \n mov, mp3, mp4, mpeg, \n mpeg-ps, mpeg-ts, mxf, ogg, \n vob, wav, webm\n

\n " } }, - "documentation": "\n

A section of the request body that provides information about the file that is being\n transcoded.

\n " + "documentation": "\n

A section of the request body that provides information about the file that is being\n transcoded.

\n ", + "required": true }, "Output": { "shape_name": "CreateJobOutput", @@ -282,7 +284,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -290,7 +292,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

The CreateJobOutput structure.

\n " @@ -444,7 +524,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -452,7 +532,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

The CreateJobOutput structure.

\n " @@ -757,7 +915,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -765,7 +923,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -952,7 +1188,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -960,7 +1196,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -1093,13 +1407,15 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\n

The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.

\n

Constraints: Maximum 40 characters.

\n " + "documentation": "\n

The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.

\n

Constraints: Maximum 40 characters.

\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket in which you saved the media files that you want to transcode.

\n " + "documentation": "\n

The Amazon S3 bucket in which you saved the media files that you want to transcode.

\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", @@ -1111,7 +1427,8 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.

\n " + "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.

\n ", + "required": true }, "Notifications": { "shape_name": "Notifications", @@ -1518,7 +1835,8 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\n

The name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.

\n " + "documentation": "\n

The name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.

\n ", + "required": true }, "Description": { "shape_name": "Description", @@ -1531,7 +1849,8 @@ "shape_name": "PresetContainer", "type": "string", "pattern": "(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^ogg$)", - "documentation": "\n

The container type for the output file. Valid values include mp3, \n mp4, ogg, ts, and webm.

\n " + "documentation": "\n

The container type for the output file. Valid values include mp3, \n mp4, ogg, ts, and webm.

\n ", + "required": true }, "Video": { "shape_name": "VideoParameters", @@ -1652,13 +1971,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -1676,7 +1995,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -1688,7 +2007,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -1747,7 +2066,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -1888,7 +2207,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -2015,13 +2334,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -2039,7 +2358,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -2051,7 +2370,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -2196,8 +2515,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline that you want to delete.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\n

The DeletePipelineRequest structure.

\n " @@ -2264,8 +2583,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the preset for which you want to get detailed information.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\n

The DeletePresetRequest structure.

\n " @@ -2325,8 +2644,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The ID of the pipeline for which you want to get job information.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Ascending": { "shape_name": "Ascending", @@ -2596,7 +2915,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -2604,7 +2923,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -2791,7 +3188,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -2799,7 +3196,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -2939,8 +3414,8 @@ "type": "string", "pattern": "(^Submitted$)|(^Progressing$)|(^Complete$)|(^Canceled$)|(^Error$)", "documentation": "\n

To get information about all of the jobs associated with the current AWS account that\n have a given status, specify the following status: Submitted,\n Progressing, Complete, Canceled, or\n Error.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Ascending": { "shape_name": "Ascending", @@ -3210,7 +3685,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -3218,7 +3693,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -3405,7 +3958,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -3413,7 +3966,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -3916,7 +4547,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -4043,13 +4674,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -4067,7 +4698,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -4079,7 +4710,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -4226,8 +4857,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the job for which you want to get detailed information.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\n

The ReadJobRequest structure.

\n " @@ -4480,7 +5111,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -4488,7 +5119,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -4675,7 +5384,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -4683,7 +5392,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -4809,8 +5596,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline to read.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\n

The ReadPipelineRequest structure.

\n " @@ -5065,8 +5852,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the preset for which you want to get detailed information.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" } }, "documentation": "\n

The ReadPresetRequest structure.

\n " @@ -5146,7 +5933,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -5273,13 +6060,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -5297,7 +6084,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -5309,7 +6096,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -5448,19 +6235,22 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.

\n " + "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.

\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.

\n " + "documentation": "\n

The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.

\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.

\n " + "documentation": "\n

The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.

\n ", + "required": true }, "Topics": { "shape_name": "SnsTopics", @@ -5472,7 +6262,8 @@ "documentation": null }, "max_length": 30, - "documentation": "\n

The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.

\n " + "documentation": "\n

The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.

\n ", + "required": true } }, "documentation": "\n

The TestRoleRequest structure.

\n " @@ -5977,8 +6768,8 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline for which you want to change notification settings.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Notifications": { "shape_name": "Notifications", @@ -6009,7 +6800,8 @@ "documentation": "\n

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

\n " } }, - "documentation": "\n

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.

\n To receive notifications, you must also subscribe to the new topic in the Amazon SNS\n console.\n \n " + "documentation": "\n

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.

\n To receive notifications, you must also subscribe to the new topic in the Amazon SNS\n console.\n \n ", + "required": true } }, "documentation": "\n

The UpdatePipelineNotificationsRequest structure.

\n " @@ -6270,14 +7062,15 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline to update.

\n ", - "location": "uri", - "required": true + "required": true, + "location": "uri" }, "Status": { "shape_name": "PipelineStatus", "type": "string", "pattern": "(^Active$)|(^Paused$)", - "documentation": "\n

The desired status of the pipeline:

\n \n " + "documentation": "\n

The desired status of the pipeline:

\n \n ", + "required": true } }, "documentation": "\n

The UpdatePipelineStatusRequest structure.

\n " diff --git a/botocore/data/aws/emr/2009-03-31.json b/botocore/data/aws/emr/2009-03-31.json index 366dde963c..2c3acf9ace 100644 --- a/botocore/data/aws/emr/2009-03-31.json +++ b/botocore/data/aws/emr/2009-03-31.json @@ -8,6 +8,7 @@ "service_abbreviation": "Amazon EMR", "timestamp_format": "unixTimestamp", "endpoint_prefix": "elasticmapreduce", + "xmlnamespace": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31", "documentation": "\n

This is the Amazon Elastic MapReduce API Reference. This guide provides descriptions and\n samples of the Amazon Elastic MapReduce APIs.

\n\n

Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of\n data efficiently. Amazon EMR uses Hadoop processing combined with several AWS\n products to do tasks such as web indexing, data mining, log file analysis, machine\n learning, scientific simulation, and data warehousing.

\n\n ", "operations": { "AddInstanceGroups": { @@ -142,7 +143,7 @@ "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", "min_length": 0, "max_length": 256, - "documentation": "\n

A string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from DescribeJobFlows.

\n ", + "documentation": "\n

A string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from ListClusters.

\n ", "required": true }, "Steps": { @@ -241,7 +242,7 @@ }, "documentation": "\n

Specification of a job flow step.

\n " }, - "documentation": "\n

A list of StepConfig to be executed by the job flow.

\n ", + "documentation": "\n

A list of StepConfig to be executed by the job flow.

\n ", "required": true } }, @@ -265,7 +266,7 @@ "documentation": "\n

The identifiers of the list of steps added to the job flow.

\n " } }, - "documentation": "\n

The output for the AddJobFlowSteps operation.

\n " + "documentation": "\n

The output for the AddJobFlowSteps operation.

\n " }, "errors": [ { @@ -286,7 +287,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\n

The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

\n " + "documentation": "\n

The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

\n ", + "required": true }, "Tags": { "shape_name": "TagList", @@ -298,26 +300,27 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, - "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

\n " + "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

\n ", + "required": true } }, - "documentation": "\n

This input identifies a cluster and a list of tags to attach.\n

\n " + "documentation": "\n

This input identifies a cluster and a list of tags to attach. \n

\n " }, "output": { "shape_name": "AddTagsOutput", "type": "structure", "members": {}, - "documentation": "\n

This output indicates the result of adding tags to a resource. \n

\n " + "documentation": "\n

This output indicates the result of adding tags to a resource. \n

\n " }, "errors": [ { @@ -327,10 +330,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -341,18 +344,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], - "documentation": "\n

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n \n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.AddTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\", \n \"Value\": \"Production\" \n }, \n { \n \"Key\": \"hbase\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n \n " }, "DescribeCluster": { "name": "DescribeCluster", @@ -363,7 +366,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster to describe.

\n " + "documentation": "\n

The identifier of the cluster to describe.

\n ", + "required": true } }, "documentation": "\n

This input determines which cluster to describe.

\n " @@ -480,7 +484,7 @@ "documentation": "\n

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

\n " } }, - "documentation": "\n

Provides information about the EC2 instances in a cluster grouped by category. For example, EC2 Key Name, Subnet Id, Instance Profile, and so on.

\n " + "documentation": "\n

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

\n " }, "LogUri": { "shape_name": "String", @@ -555,7 +559,7 @@ "documentation": "\n

This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

" } }, - "documentation": "\n

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

\n \n " + "documentation": "\n

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

\n \n " }, "documentation": "\n

The applications installed on this cluster.

\n " }, @@ -569,17 +573,22 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, - "documentation": "\n

A list of tags associated with cluster.

" + "documentation": "\n

A list of tags associated with a cluster.

" + }, + "ServiceRole": { + "shape_name": "String", + "type": "string", + "documentation": "\n

The IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.

\n " } }, "documentation": "\n

This output contains the details for the requested cluster.

\n " @@ -595,10 +604,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -609,15 +618,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.

\n \n " @@ -1236,6 +1245,14 @@ "min_length": 0, "max_length": 10280, "documentation": "\n

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

\n " + }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\n

The IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.

\n " } }, "documentation": "\n

A description of a job flow.

\n " @@ -1253,7 +1270,7 @@ "documentation": "\n

Indicates that an error occurred while processing the request and that the request was not\n completed.

\n " } ], - "documentation": "\n

DescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.

\n

Regardless of supplied parameters, only job flows created within the last two months are\n returned.

\n

If no parameters are supplied, then job flows matching either of the following criteria\n are returned:

\n \n

Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

\n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.DescribeJobFlows\nContent-Length: 62\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130715T220330Z\nX-Amz-Content-Sha256: fce83af973f96f173512aca2845c56862b946feb1de0600326f1365b658a0e39\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130715/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=29F98a6f44e05ad54fe1e8b3d1a7101ab08dc3ad348995f89c533693cee2bb3b\nAccept: */*\n\n{\n \"JobFlowIds\": [\"j-ZKIY4CKQRX72\"],\n \"DescriptionType\": \"EXTENDED\"\n}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 634d4142-ed9a-11e2-bbba-b56d7d016ec4\nContent-Type: application/x-amz-json-1.1\nContent-Length: 1624\nDate: Mon, 15 Jul 2013 22:03:31 GMT\n\n{\"JobFlows\": [{\n \"AmiVersion\": \"2.3.6\",\n \"BootstrapActions\": [],\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"LastStateChangeReason\": \"Steps completed\",\n \"ReadyDateTime\": 1.373923754E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"Instances\": {\n \"HadoopVersion\": \"1.0.3\",\n \"InstanceCount\": 1,\n \"InstanceGroups\": [{\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"InstanceGroupId\": \"ig-3SRUWV3E0NB7K\",\n \"InstanceRequestCount\": 1,\n \"InstanceRole\": \"MASTER\",\n \"InstanceRunningCount\": 0,\n \"InstanceType\": \"m1.small\",\n \"LastStateChangeReason\": \"Job flow terminated\",\n \"Market\": \"ON_DEMAND\",\n \"Name\": \"Master InstanceGroup\",\n \"ReadyDateTime\": 1.37392375E9,\n \"StartDateTime\": 1.373923646E9,\n \"State\": \"ENDED\"\n }],\n \"KeepJobFlowAliveWhenNoSteps\": false,\n \"MasterInstanceId\": \"i-8c4fbbef\",\n \"MasterInstanceType\": \"m1.small\",\n \"MasterPublicDnsName\": \"ec2-107-20-46-140.compute-1.amazonaws.com\",\n \"NormalizedInstanceHours\": 1,\n \"Placement\": {\"AvailabilityZone\": \"us-east-1a\"},\n \"TerminationProtected\": false\n },\n \"JobFlowId\": \"j-ZKIY4CKQRX72\",\n \"Name\": \"Development Job Flow\",\n \"Steps\": [{\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923914E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"StepConfig\": {\n \"ActionOnFailure\": \"CANCEL_AND_WAIT\",\n \"HadoopJarStep\": {\n \"Args\": [\n \"-input\",\n \"s3://elasticmapreduce/samples/wordcount/input\",\n \"-output\",\n \"s3://examples-bucket/example-output\",\n \"-mapper\",\n \"s3://elasticmapreduce/samples/wordcount/wordSplitter.py\",\n \"-reducer\",\n \"aggregate\"\n ],\n \"Jar\": \"/home/hadoop/contrib/streaming/hadoop-streaming.jar\",\n \"Properties\": []\n },\n \"Name\": \"Example Streaming Step\"\n }\n }],\n \"SupportedProducts\": [],\n \"VisibleToAllUsers\": false\n}]}\n\n \n " + "documentation": "\n

This API is deprecated and will eventually be removed. We recommend you use ListClusters,\n DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions\n instead.

\n

DescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.

\n

Regardless of supplied parameters, only job flows created within the last two months are\n returned.

\n

If no parameters are supplied, then job flows matching either of the following criteria\n are returned:

\n \n

Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

\n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.DescribeJobFlows\nContent-Length: 62\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130715T220330Z\nX-Amz-Content-Sha256: fce83af973f96f173512aca2845c56862b946feb1de0600326f1365b658a0e39\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130715/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=29F98a6f44e05ad54fe1e8b3d1a7101ab08dc3ad348995f89c533693cee2bb3b\nAccept: */*\n\n{\n \"JobFlowIds\": [\"j-ZKIY4CKQRX72\"],\n \"DescriptionType\": \"EXTENDED\"\n}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 634d4142-ed9a-11e2-bbba-b56d7d016ec4\nContent-Type: application/x-amz-json-1.1\nContent-Length: 1624\nDate: Mon, 15 Jul 2013 22:03:31 GMT\n\n{\"JobFlows\": [{\n \"AmiVersion\": \"2.3.6\",\n \"BootstrapActions\": [],\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"LastStateChangeReason\": \"Steps completed\",\n \"ReadyDateTime\": 1.373923754E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"Instances\": {\n \"HadoopVersion\": \"1.0.3\",\n \"InstanceCount\": 1,\n \"InstanceGroups\": [{\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"InstanceGroupId\": \"ig-3SRUWV3E0NB7K\",\n \"InstanceRequestCount\": 1,\n \"InstanceRole\": \"MASTER\",\n \"InstanceRunningCount\": 0,\n \"InstanceType\": \"m1.small\",\n \"LastStateChangeReason\": \"Job flow terminated\",\n \"Market\": \"ON_DEMAND\",\n \"Name\": \"Master InstanceGroup\",\n \"ReadyDateTime\": 1.37392375E9,\n \"StartDateTime\": 1.373923646E9,\n \"State\": \"ENDED\"\n }],\n \"KeepJobFlowAliveWhenNoSteps\": false,\n \"MasterInstanceId\": \"i-8c4fbbef\",\n \"MasterInstanceType\": \"m1.small\",\n \"MasterPublicDnsName\": \"ec2-107-20-46-140.compute-1.amazonaws.com\",\n \"NormalizedInstanceHours\": 1,\n \"Placement\": {\"AvailabilityZone\": \"us-east-1a\"},\n \"TerminationProtected\": false\n },\n \"JobFlowId\": \"j-ZKIY4CKQRX72\",\n \"Name\": \"Development Job Flow\",\n \"Steps\": [{\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923914E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"StepConfig\": {\n \"ActionOnFailure\": \"CANCEL_AND_WAIT\",\n \"HadoopJarStep\": {\n \"Args\": [\n \"-input\",\n \"s3://elasticmapreduce/samples/wordcount/input\",\n \"-output\",\n \"s3://examples-bucket/example-output\",\n \"-mapper\",\n \"s3://elasticmapreduce/samples/wordcount/wordSplitter.py\",\n \"-reducer\",\n \"aggregate\"\n ],\n \"Jar\": \"/home/hadoop/contrib/streaming/hadoop-streaming.jar\",\n \"Properties\": []\n },\n \"Name\": \"Example Streaming Step\"\n }\n }],\n \"SupportedProducts\": [],\n \"VisibleToAllUsers\": false\n}]}\n\n \n " }, "DescribeStep": { "name": "DescribeStep", @@ -1264,12 +1281,14 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster with steps to describe.

\n " + "documentation": "\n

The identifier of the cluster with steps to describe.

\n ", + "required": true }, "StepId": { "shape_name": "StepId", "type": "string", - "documentation": "\n

The identifier of the step to describe.

\n " + "documentation": "\n

The identifier of the step to describe.

\n ", + "required": true } }, "documentation": "\n

This input determines which step to describe.

\n " @@ -1343,7 +1362,7 @@ "CANCEL_AND_WAIT", "CONTINUE" ], - "documentation": "\n

This specifies what action to take when the cluster step fails. TERMINATE_JOB_FLOW is deprecated, use TERMINATE_CLUSTER instead.\n

\n " + "documentation": "\n

This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.\n

\n " }, "Status": { "shape_name": "StepStatus", @@ -1394,12 +1413,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n

\n " + "documentation": "\n

The date and time when the cluster step execution started.\n

\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n

\n " + "documentation": "\n

The date and time when the cluster step execution completed or failed.\n

\n " } }, "documentation": "\n

The timeline of the cluster step status over time.\n

\n " @@ -1421,10 +1440,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1435,15 +1454,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides more detail about the cluster step.

\n " @@ -1457,12 +1476,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The cluster identifier for the bootstrap actions to list.

\n " + "documentation": "\n

The cluster identifier for the bootstrap actions to list\n .

\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve\n .

\n " } }, "documentation": "\n

This input determines which bootstrap actions to retrieve.

\n " @@ -1501,15 +1521,15 @@ }, "documentation": "\n

An entity describing an executable that runs on a cluster.

\n " }, - "documentation": "\n

The bootstrap actions associated with the cluster.

\n " + "documentation": "\n

The bootstrap actions associated with the cluster\n .

\n " }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve\n .

\n " } }, - "documentation": "\n

This output contains the bootstrap actions detail.

\n " + "documentation": "\n

This output contains the boostrap actions detail\n .

\n " }, "errors": [ { @@ -1519,10 +1539,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1533,15 +1553,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides information about the bootstrap actions associated with a cluster.

\n \n ", @@ -1561,12 +1581,12 @@ "CreatedAfter": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The creation date and time beginning value filter for listing clusters.

\n " + "documentation": "\n

The creation date and time beginning value filter for listing clusters\n .

\n " }, "CreatedBefore": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The creation date and time end value filter for listing clusters.

\n " + "documentation": "\n

The creation date and time end value filter for listing clusters\n .

\n " }, "ClusterStates": { "shape_name": "ClusterStateList", @@ -1590,7 +1610,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.\n

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.\n

\n " } }, "documentation": "\n

This input determines how the ListClusters action filters the list of clusters that it returns.

\n " @@ -1693,7 +1713,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned. \n

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.\n

\n " } }, "documentation": "\n

This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.

\n " @@ -1706,10 +1726,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1720,15 +1740,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status.\n This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls. \n

\n \n ", @@ -1748,12 +1768,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the instance groups.

\n " + "documentation": "\n

The identifier of the cluster for which to list the instance groups.

\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instance groups to retrieve.

\n " @@ -1840,7 +1861,7 @@ "SHUTTING_DOWN", "ENDED" ], - "documentation": "\n

The current state of the instance group. The following values are deprecated: ARRESTED, SHUTTING_DOWN, and ENDED. Use SUSPENDED, TERMINATING, and TERMINATED instead, respectively.\n

\n " + "documentation": "\n

The current state of the instance group.

\n " }, "StateChangeReason": { "shape_name": "InstanceGroupStateChangeReason", @@ -1898,7 +1919,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instance groups to retrieve.

\n " @@ -1911,10 +1932,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1925,15 +1946,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides all available details about the instance groups in a cluster.

\n \n ", @@ -1953,7 +1974,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the instances.

\n " + "documentation": "\n

The identifier of the cluster for which to list the instances.

\n ", + "required": true }, "InstanceGroupId": { "shape_name": "InstanceGroupId", @@ -1978,7 +2000,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instances to list.

\n " @@ -2097,7 +2119,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This output contains the list of instances.

\n " @@ -2110,10 +2132,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2124,15 +2146,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. \n For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.\n

\n ", @@ -2152,7 +2174,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the steps.

\n " + "documentation": "\n

The identifier of the cluster for which to list the steps.

\n ", + "required": true }, "StepStates": { "shape_name": "StepStateList", @@ -2175,7 +2198,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which steps to list.

\n " @@ -2250,12 +2273,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n

\n " + "documentation": "\n

The date and time when the cluster step execution started.\n

\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n

\n " + "documentation": "\n

The date and time when the cluster step execution completed or failed.\n

\n " } }, "documentation": "\n

The timeline of the cluster step status over time.\n

\n " @@ -2271,7 +2294,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This output contains the list of steps.

\n " @@ -2284,10 +2307,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2298,15 +2321,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides a list of steps for the cluster. \n

\n ", @@ -2352,7 +2375,7 @@ "type": "string", "documentation": null }, - "documentation": "\n

The EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.

\n " + "documentation": "\n

The EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.

\n " } }, "documentation": "\n

Modify an instance group size.

\n " @@ -2382,7 +2405,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\n

The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

\n " + "documentation": "\n

The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

\n ", + "required": true }, "TagKeys": { "shape_name": "StringList", @@ -2392,16 +2416,17 @@ "type": "string", "documentation": null }, - "documentation": "\n

A list of tag keys to remove from a resource.

\n " + "documentation": "\n

A list of tag keys to remove from a resource.

\n ", + "required": true } }, - "documentation": "\n

This input identifies a cluster and a list of tags to remove. \n

\n " + "documentation": "\n

This input identifies a cluster and a list of tags to remove. \n

\n " }, "output": { "shape_name": "RemoveTagsOutput", "type": "structure", "members": {}, - "documentation": "\n

This output indicates the result of removing tags from a resource. \n

\n " + "documentation": "\n

This output indicates the result of removing tags from a resource. \n

\n " }, "errors": [ { @@ -2411,10 +2436,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2425,18 +2450,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], - "documentation": "\n

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n \n " + "documentation": "\n

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n

The following example removes the stack tag with value Prod from a cluster:

\n \n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.RemoveTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\", \n \"Value\": \"Prod\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n

The following example removes the stack and hbase tags from a cluster:

\n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.RemoveTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\" \n }, \n { \n \"Key\": \"hbase\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n
\n " }, "RunJobFlow": { "name": "RunJobFlow", @@ -2827,6 +2852,14 @@ "max_length": 10280, "documentation": "\n

An IAM role for the job flow. The EC2 instances of the job flow assume this role. The default role is EMRJobflowDefault. In order to use the default role, you must have already created it using the CLI.

\n " }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\n

IAM role that Amazon ElasticMapReduce will assume to work with AWS resources on your behalf. You may set this parameter to the name of an existing IAM role.

\n " + }, "Tags": { "shape_name": "TagList", "type": "list", @@ -2837,15 +2870,15 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances.

\n " } @@ -2992,7 +3025,6 @@ "documentation": "\n

\n TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step\n not yet completed is canceled and the EC2 instances on which the job flow is running are\n stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was\n specified when the job flow was created. \n

\n

\n The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, \n it may take up to 5-20 minutes for the job flow to \n completely terminate and release allocated resources, such as Amazon EC2 instances.\n

\n \n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.TerminateJobFlows\nContent-Length: 33\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130716T211858Z\nX-Amz-Content-Sha256: ab64713f61e066e80a6083844b9249b6c6362d34a7ae7393047aa46d38b9e315\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130716/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=9791416eaf09f36aa753a324b0de27ff5cc7084b8548cc748487a2bcb3439d58\nAccept: */*\n\n{\"JobFlowIds\": [\"j-3TS0OIYO4NFN\"]}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 5551a7c9-ee5d-11e2-9542-25296c300ff0\nContent-Type: application/x-amz-json-1.1\nContent-Length: 0\nDate: Tue, 16 Jul 2013 21:18:59 GMT\n \n \n " } }, - "xmlnamespace": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31", "metadata": { "regions": { "us-east-1": null, diff --git a/services/_endpoints.json b/services/_endpoints.json index 5d4b8a2559..de7ef8047f 100644 --- a/services/_endpoints.json +++ b/services/_endpoints.json @@ -163,7 +163,7 @@ ], "dynamodb": [ { - "uri": "http://localhost:8080", + "uri": "http://localhost:8000", "constraints": [ ["region", "equals", "local"] ], diff --git a/services/elastictranscoder.json b/services/elastictranscoder.json index 6acef2ba98..e60d3b3ace 100644 --- a/services/elastictranscoder.json +++ b/services/elastictranscoder.json @@ -23,6 +23,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the job that you want to cancel.

\n

To get a list of the jobs (including their jobId) that have a status of\n Submitted, use the ListJobsByStatus API action.

\n ", + "required": true, "location": "uri" } }, @@ -96,7 +97,8 @@ "shape_name": "Id", "type": "string", "pattern": "^\\d{13}-\\w{6}$", - "documentation": "\n

The Id of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.

\n " + "documentation": "\n

The Id of the pipeline that you want Elastic Transcoder to use for\n transcoding. The pipeline determines several settings, including the Amazon S3 bucket\n from which Elastic Transcoder gets the files to transcode and the bucket into which\n Elastic Transcoder puts the transcoded files.

\n ", + "required": true }, "Input": { "shape_name": "JobInput", @@ -140,7 +142,8 @@ "documentation": "\n

The container type for the input file. If you want Elastic Transcoder to automatically detect the\n container type of the input file, specify auto. If you want to specify the\n container type for the input file, enter one of the following values:

\n

\n 3gp, aac, asf, avi, \n divx, flv, m4a, mkv, \n mov, mp3, mp4, mpeg, \n mpeg-ps, mpeg-ts, mxf, ogg, \n vob, wav, webm\n

\n " } }, - "documentation": "\n

A section of the request body that provides information about the file that is being\n transcoded.

\n " + "documentation": "\n

A section of the request body that provides information about the file that is being\n transcoded.

\n ", + "required": true }, "Output": { "shape_name": "CreateJobOutput", @@ -288,7 +291,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -296,7 +299,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

The CreateJobOutput structure.

\n " @@ -450,7 +531,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -458,7 +539,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output file. \n For the current release, you can only specify settings for a single clip per output \n file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

The CreateJobOutput structure.

\n " @@ -763,7 +922,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -771,7 +930,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -958,7 +1195,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -966,7 +1203,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -1105,13 +1420,15 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\n

The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.

\n

Constraints: Maximum 40 characters.

\n " + "documentation": "\n

The name of the pipeline. We recommend that the name be unique within the AWS account,\n but uniqueness is not enforced.

\n

Constraints: Maximum 40 characters.

\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket in which you saved the media files that you want to transcode.

\n " + "documentation": "\n

The Amazon S3 bucket in which you saved the media files that you want to transcode.

\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", @@ -1123,7 +1440,8 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.

\n " + "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the\n pipeline.

\n ", + "required": true }, "Notifications": { "shape_name": "Notifications", @@ -1536,7 +1854,8 @@ "type": "string", "min_length": 1, "max_length": 40, - "documentation": "\n

The name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.

\n " + "documentation": "\n

The name of the preset. We recommend that the name be unique within the AWS account, but\n uniqueness is not enforced.

\n ", + "required": true }, "Description": { "shape_name": "Description", @@ -1549,7 +1868,8 @@ "shape_name": "PresetContainer", "type": "string", "pattern": "(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^ogg$)", - "documentation": "\n

The container type for the output file. Valid values include mp3, \n mp4, ogg, ts, and webm.

\n " + "documentation": "\n

The container type for the output file. Valid values include mp3, \n mp4, ogg, ts, and webm.

\n ", + "required": true }, "Video": { "shape_name": "VideoParameters", @@ -1670,13 +1990,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -1694,7 +2014,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -1706,7 +2026,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -1765,7 +2085,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -1906,7 +2226,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -2033,13 +2353,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -2057,7 +2377,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -2069,7 +2389,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -2219,6 +2539,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline that you want to delete.

\n ", + "required": true, "location": "uri" } }, @@ -2293,6 +2614,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the preset for which you want to get detailed information.

\n ", + "required": true, "location": "uri" } }, @@ -2359,6 +2681,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The ID of the pipeline for which you want to get job information.

\n ", + "required": true, "location": "uri" }, "Ascending": { @@ -2629,7 +2952,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -2637,7 +2960,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -2824,7 +3225,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -2832,7 +3233,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -2971,6 +3450,7 @@ "type": "string", "pattern": "(^Submitted$)|(^Progressing$)|(^Complete$)|(^Canceled$)|(^Error$)", "documentation": "\n

To get information about all of the jobs associated with the current AWS account that\n have a given status, specify the following status: Submitted,\n Progressing, Complete, Canceled, or\n Error.

\n ", + "required": true, "location": "uri" }, "Ascending": { @@ -3241,7 +3721,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -3249,7 +3729,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -3436,7 +3994,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -3444,7 +4002,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -3944,7 +4580,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -4071,13 +4707,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -4095,7 +4731,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -4107,7 +4743,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -4252,6 +4888,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the job for which you want to get detailed information.

\n ", + "required": true, "location": "uri" } }, @@ -4505,7 +5142,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -4513,7 +5150,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

If you specified one output for a job, information about that output. If you specified\n multiple outputs for a job, the Output object lists information about the first output.\n This duplicates the information that is listed for the first output in the Outputs\n object.

\n

Outputs recommended instead. A section of the request or response\n body that provides information about the transcoded (target) file.

\n " @@ -4700,7 +5415,7 @@ "shape_name": "Time", "type": "string", "pattern": "(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", - "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file , Elastic Transcoder transcodes \n the file and returns a warning message.

\n " + "documentation": "\n

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS \n is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, \n Elastic Transcoder creates an output file from StartTime to the end of the file.

\n

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes \n the file and returns a warning message.

\n " } }, "documentation": "\n

Settings that determine when a clip begins and how long it lasts.

\n " @@ -4708,7 +5423,85 @@ }, "documentation": "\n

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

\n " }, - "documentation": "\n

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

\n " + "documentation": "\n

You can create an output file that contains an excerpt from the input file. This \n excerpt, called a clip, can come from the beginning, middle, or end of the file. \n The Composition object contains settings for the clips that make up an output \n file. For the current release, you can only specify settings for a single clip \n per output file. The Composition object cannot be null.

\n " + }, + "Captions": { + "shape_name": "Captions", + "type": "structure", + "members": { + "MergePolicy": { + "shape_name": "CaptionMergePolicy", + "type": "string", + "pattern": "(^MergeOverride$)|(^MergeRetain$)|(^Override$)", + "documentation": "\n

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

\n \n

MergePolicy cannot be null.

\n " + }, + "CaptionSources": { + "shape_name": "CaptionSources", + "type": "list", + "members": { + "shape_name": "CaptionSource", + "type": "structure", + "members": { + "Key": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

The name of the sidecar caption file that you want Elastic Transcoder to include in the\n output file.

\n " + }, + "Language": { + "shape_name": "Key", + "type": "string", + "min_length": 1, + "max_length": 255, + "documentation": "\n

A string that specifies the language of the caption. Specify this as one of:

\n \n

For more information on ISO language codes and language names, see the List of ISO \n 639-1 codes.

\n " + }, + "TimeOffset": { + "shape_name": "TimeOffset", + "type": "string", + "pattern": "(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)", + "documentation": "\n

For clip generation or captions that do not start at the same time\n as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode\n before including captions.

\n

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

\n " + }, + "Label": { + "shape_name": "Name", + "type": "string", + "min_length": 1, + "max_length": 40, + "documentation": "\n

The label of the caption shown in the player when choosing\n a language. We recommend that you put the caption language name here, \n in the language of the captions.

\n " + } + }, + "documentation": "\n

A source file for the input sidecar captions used during the transcoding\n process.

\n " + }, + "max_length": 20, + "documentation": "\n

Source files for the input sidecar captions used during the transcoding\n process. To omit all sidecar captions, leave CaptionSources blank.

\n " + }, + "CaptionFormats": { + "shape_name": "CaptionFormats", + "type": "list", + "members": { + "shape_name": "CaptionFormat", + "type": "structure", + "members": { + "Format": { + "shape_name": "CaptionFormatFormat", + "type": "string", + "pattern": "(^mov-text$)|(^cea-608$)|(^cea-708$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)", + "documentation": "\n

The format you specify determines whether Elastic Transcoder generates an\n embedded or sidecar caption for this output.

\n \n " + }, + "Pattern": { + "shape_name": "CaptionFormatPattern", + "type": "string", + "pattern": "(^$)|(^.*\\{language\\}.*$)", + "documentation": "\n

The prefix for caption filenames, in the form description-{language}, where:

\n \n

If you don't include {language} in the file name pattern, Elastic Transcoder automatically\n appends \"{language}\" to the value that you specify for the description. In addition,\n Elastic Transcoder automatically appends the count to the end of the segment files.

\n

For example, suppose you're transcoding into srt format. When you enter\n \"Sydney-{language}-sunrise\", and the language of the captions is English\n (en), the name of the first caption file will be Sydney-en-sunrise00000.srt.

\n " + } + }, + "documentation": "\n

The file format of the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + }, + "max_length": 4, + "documentation": "\n

The array of file formats for the output captions. If you leave this value blank,\n Elastic Transcoder returns an error.

\n " + } + }, + "documentation": "\n

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another.\n All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

\n \n

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

\n

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures\n as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does\n not preserve text formatting (for example, italics) during the transcoding process.

\n

To remove captions or leave the captions empty, set Captions to null. To pass through\n existing captions unchanged, set the MergePolicy to MergeRetain,\n and pass in a null CaptionSources array.

\n

For more information on embedded files, see the Subtitles Wikipedia page.

\n

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file\n Wikipedia pages.

\n " } }, "documentation": "\n

Outputs recommended instead.If you specified one output for a job,\n information about that output. If you specified multiple outputs for a job, the\n Output object lists information about the first output. This duplicates\n the information that is listed for the first output in the Outputs\n object.

\n " @@ -4839,6 +5632,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline to read.

\n ", + "required": true, "location": "uri" } }, @@ -5099,6 +5893,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the preset for which you want to get detailed information.

\n ", + "required": true, "location": "uri" } }, @@ -5179,7 +5974,7 @@ "shape_name": "AudioCodecProfile", "type": "string", "pattern": "(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)", - "documentation": "\n

If you specified AAC for Audio:Codec, choose the AAC profile for the output file.\n Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " + "documentation": "\n

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

\n

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

\n \n

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated\n your presets to use AAC-LC. You can change the value as required.

\n " } }, "documentation": "\n

If you specified AAC for Audio:Codec, this is the AAC \n compression profile to use. Valid values include:

\n

auto, AAC-LC, HE-AAC, HE-AACv2

\n

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

\n " @@ -5306,13 +6101,13 @@ "MaxWidth": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum width of the watermark in one of the following formats:

\n " }, "MaxHeight": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of\n MaxHeight.

\n " }, "SizingPolicy": { @@ -5330,7 +6125,7 @@ "HorizontalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n

The amount by which you want the horizontal position of the watermark to be offset from\n the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for\n HorizontalOffset, the left side of the watermark appears 5 pixels from\n the left border of the output video.

\n

HorizontalOffset is only valid when the value of\n HorizontalAlign is Left or Right. If you\n specify an offset that causes the watermark to extend beyond the left or right border\n and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic\n Transcoder has added black bars, the watermark extends into the black bars. If the\n watermark extends beyond the black bars, it is cropped.

\n

Use the value of Target to specify whether you want to include the black\n bars that are added by Elastic Transcoder, if any, in the offset calculation.

\n " }, "VerticalAlign": { @@ -5342,7 +6137,7 @@ "VerticalOffset": { "shape_name": "PixelsOrPercent", "type": "string", - "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{2,4}?px$)", + "pattern": "(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)", "documentation": "\n VerticalOffset\n

The amount by which you want the vertical position of the watermark to be offset from the\n position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and\n 5px for VerticalOffset, the top of the watermark appears 5\n pixels from the top border of the output video.

\n

VerticalOffset is only valid when the value of VerticalAlign is Top or\n Bottom.

\n

If you specify an offset that causes the watermark to extend beyond the top or bottom\n border and Elastic Transcoder has not added black bars, the watermark is cropped. If\n Elastic Transcoder has added black bars, the watermark extends into the black bars. If\n the watermark extends beyond the black bars, it is cropped.

\n\n

Use the value of Target to specify whether you want Elastic Transcoder to\n include the black bars that are added by Elastic Transcoder, if any, in the offset\n calculation.

\n " }, "Opacity": { @@ -5486,19 +6281,22 @@ "shape_name": "Role", "type": "string", "pattern": "^arn:aws:iam::\\w{12}:role/.+$", - "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.

\n " + "documentation": "\n

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to\n test.

\n ", + "required": true }, "InputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.

\n " + "documentation": "\n

The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read\n from this bucket.

\n ", + "required": true }, "OutputBucket": { "shape_name": "BucketName", "type": "string", "pattern": "^(\\w|\\.|-){1,255}$", - "documentation": "\n

The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.

\n " + "documentation": "\n

The Amazon S3 bucket that Elastic Transcoder will write transcoded media files to. The action attempts to\n read from this bucket.

\n ", + "required": true }, "Topics": { "shape_name": "SnsTopics", @@ -5510,7 +6308,8 @@ "documentation": null }, "max_length": 30, - "documentation": "\n

The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.

\n " + "documentation": "\n

The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test\n notification to.

\n ", + "required": true } }, "documentation": "\n

The TestRoleRequest structure.

\n " @@ -6026,6 +6825,7 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline for which you want to change notification settings.

\n ", + "required": true, "location": "uri" }, "Notifications": { @@ -6057,7 +6857,8 @@ "documentation": "\n

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

\n " } }, - "documentation": "\n

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.

\n To receive notifications, you must also subscribe to the new topic in the Amazon SNS\n console.\n \n " + "documentation": "\n

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job\n status.

\n To receive notifications, you must also subscribe to the new topic in the Amazon SNS\n console.\n \n ", + "required": true } }, "documentation": "\n

The UpdatePipelineNotificationsRequest structure.

\n " @@ -6324,13 +7125,15 @@ "type": "string", "pattern": "^\\d{13}-\\w{6}$", "documentation": "\n

The identifier of the pipeline to update.

\n ", + "required": true, "location": "uri" }, "Status": { "shape_name": "PipelineStatus", "type": "string", "pattern": "(^Active$)|(^Paused$)", - "documentation": "\n

The desired status of the pipeline:

\n \n " + "documentation": "\n

The desired status of the pipeline:

\n \n ", + "required": true } }, "documentation": "\n

The UpdatePipelineStatusRequest structure.

\n " diff --git a/services/emr.json b/services/emr.json index 1163d31c40..1e5333adcd 100644 --- a/services/emr.json +++ b/services/emr.json @@ -8,6 +8,7 @@ "service_abbreviation": "Amazon EMR", "timestamp_format": "unixTimestamp", "endpoint_prefix": "elasticmapreduce", + "xmlnamespace": "http://elasticmapreduce.amazonaws.com/doc/2009-03-31", "documentation": "\n

This is the Amazon Elastic MapReduce API Reference. This guide provides descriptions and\n samples of the Amazon Elastic MapReduce APIs.

\n\n

Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy to process large amounts of\n data efficiently. Amazon EMR uses Hadoop processing combined with several AWS\n products to do tasks such as web indexing, data mining, log file analysis, machine\n learning, scientific simulation, and data warehousing.

\n\n ", "operations": { "AddInstanceGroups": { @@ -143,7 +144,7 @@ "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", "min_length": 0, "max_length": 256, - "documentation": "\n

A string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from DescribeJobFlows.

\n ", + "documentation": "\n

A string that uniquely identifies the job flow. This identifier is returned by\n RunJobFlow and can also be obtained from ListClusters.

\n ", "required": true }, "Steps": { @@ -242,7 +243,7 @@ }, "documentation": "\n

Specification of a job flow step.

\n " }, - "documentation": "\n

A list of StepConfig to be executed by the job flow.

\n ", + "documentation": "\n

A list of StepConfig to be executed by the job flow.

\n ", "required": true } }, @@ -266,7 +267,7 @@ "documentation": "\n

The identifiers of the list of steps added to the job flow.

\n " } }, - "documentation": "\n

The output for the AddJobFlowSteps operation.

\n " + "documentation": "\n

The output for the AddJobFlowSteps operation.

\n " }, "errors": [ { @@ -288,7 +289,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\n

The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

\n " + "documentation": "\n

The Amazon EMR resource identifier to which tags will be added. This value must be a cluster identifier.

\n ", + "required": true }, "Tags": { "shape_name": "TagList", @@ -300,27 +302,28 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, - "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

\n " + "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances. Tags are user-defined key/value pairs that consist of a required key string with a maximum of 128 characters, and an optional value string with a maximum of 256 characters.

\n ", + "required": true } }, - "documentation": "\n

This input identifies a cluster and a list of tags to attach.\n

\n " + "documentation": "\n

This input identifies a cluster and a list of tags to attach. \n

\n " }, "output": { "shape_name": "AddTagsOutput", "type": "structure", "members": { }, - "documentation": "\n

This output indicates the result of adding tags to a resource. \n

\n " + "documentation": "\n

This output indicates the result of adding tags to a resource. \n

\n " }, "errors": [ { @@ -330,10 +333,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -344,18 +347,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], - "documentation": "\n

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

Adds tags to an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n \n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.AddTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\", \n \"Value\": \"Production\" \n }, \n { \n \"Key\": \"hbase\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n \n " }, "DescribeCluster": { "name": "DescribeCluster", @@ -366,7 +369,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster to describe.

\n " + "documentation": "\n

The identifier of the cluster to describe.

\n ", + "required": true } }, "documentation": "\n

This input determines which cluster to describe.

\n " @@ -483,7 +487,7 @@ "documentation": "\n

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

\n " } }, - "documentation": "\n

Provides information about the EC2 instances in a cluster grouped by category. For example, EC2 Key Name, Subnet Id, Instance Profile, and so on.

\n " + "documentation": "\n

Provides information about the EC2 instances in a cluster grouped by category. For example, key name, subnet ID, IAM instance profile, and so on.

\n " }, "LogUri": { "shape_name": "String", @@ -558,7 +562,7 @@ "documentation": "\n

This option is for advanced users only. This is meta information about third-party applications that third-party vendors use for testing purposes.

" } }, - "documentation": "\n

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

\n \n " + "documentation": "\n

An application is any Amazon or third-party software that you can add to the cluster. This structure contains a list of strings that indicates the software to use with the cluster and accepts a user argument list. Amazon EMR accepts and forwards the argument list to the corresponding installation\n script as bootstrap action argument. For more information, see Launch a Job Flow on the MapR Distribution for Hadoop. Currently supported values are:

\n \n " }, "documentation": "\n

The applications installed on this cluster.

\n " }, @@ -572,17 +576,22 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, - "documentation": "\n

A list of tags associated with cluster.

" + "documentation": "\n

A list of tags associated with a cluster.

" + }, + "ServiceRole": { + "shape_name": "String", + "type": "string", + "documentation": "\n

The IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.

\n " } }, "documentation": "\n

This output contains the details for the requested cluster.

\n " @@ -598,10 +607,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -612,15 +621,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides cluster-level details including status, hardware and software configuration, VPC settings, and so on. For information about the cluster steps, see ListSteps.

\n \n " @@ -1239,6 +1248,14 @@ "min_length": 0, "max_length": 10280, "documentation": "\n

The IAM role that was specified when the job flow was launched. The EC2 instances of the job flow assume this role.

\n " + }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\n

The IAM role that was specified when the job flow was launched. Amazon ElasticMapReduce will assume this role to work with AWS resources on your behalf.

\n " } }, "documentation": "\n

A description of a job flow.

\n " @@ -1257,7 +1274,7 @@ "documentation": "\n

Indicates that an error occurred while processing the request and that the request was not\n completed.

\n " } ], - "documentation": "\n

DescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.

\n

Regardless of supplied parameters, only job flows created within the last two months are\n returned.

\n

If no parameters are supplied, then job flows matching either of the following criteria\n are returned:

\n \n

Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

\n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.DescribeJobFlows\nContent-Length: 62\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130715T220330Z\nX-Amz-Content-Sha256: fce83af973f96f173512aca2845c56862b946feb1de0600326f1365b658a0e39\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130715/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=29F98a6f44e05ad54fe1e8b3d1a7101ab08dc3ad348995f89c533693cee2bb3b\nAccept: */*\n\n{\n \"JobFlowIds\": [\"j-ZKIY4CKQRX72\"],\n \"DescriptionType\": \"EXTENDED\"\n}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 634d4142-ed9a-11e2-bbba-b56d7d016ec4\nContent-Type: application/x-amz-json-1.1\nContent-Length: 1624\nDate: Mon, 15 Jul 2013 22:03:31 GMT\n\n{\"JobFlows\": [{\n \"AmiVersion\": \"2.3.6\",\n \"BootstrapActions\": [],\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"LastStateChangeReason\": \"Steps completed\",\n \"ReadyDateTime\": 1.373923754E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"Instances\": {\n \"HadoopVersion\": \"1.0.3\",\n \"InstanceCount\": 1,\n \"InstanceGroups\": [{\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"InstanceGroupId\": \"ig-3SRUWV3E0NB7K\",\n \"InstanceRequestCount\": 1,\n \"InstanceRole\": \"MASTER\",\n \"InstanceRunningCount\": 0,\n \"InstanceType\": \"m1.small\",\n \"LastStateChangeReason\": \"Job flow terminated\",\n \"Market\": \"ON_DEMAND\",\n \"Name\": \"Master InstanceGroup\",\n \"ReadyDateTime\": 1.37392375E9,\n \"StartDateTime\": 1.373923646E9,\n \"State\": \"ENDED\"\n }],\n \"KeepJobFlowAliveWhenNoSteps\": false,\n \"MasterInstanceId\": \"i-8c4fbbef\",\n \"MasterInstanceType\": \"m1.small\",\n \"MasterPublicDnsName\": \"ec2-107-20-46-140.compute-1.amazonaws.com\",\n \"NormalizedInstanceHours\": 1,\n \"Placement\": {\"AvailabilityZone\": \"us-east-1a\"},\n \"TerminationProtected\": false\n },\n \"JobFlowId\": \"j-ZKIY4CKQRX72\",\n \"Name\": \"Development Job Flow\",\n \"Steps\": [{\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923914E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"StepConfig\": {\n \"ActionOnFailure\": \"CANCEL_AND_WAIT\",\n \"HadoopJarStep\": {\n \"Args\": [\n \"-input\",\n \"s3://elasticmapreduce/samples/wordcount/input\",\n \"-output\",\n \"s3://examples-bucket/example-output\",\n \"-mapper\",\n \"s3://elasticmapreduce/samples/wordcount/wordSplitter.py\",\n \"-reducer\",\n \"aggregate\"\n ],\n \"Jar\": \"/home/hadoop/contrib/streaming/hadoop-streaming.jar\",\n \"Properties\": []\n },\n \"Name\": \"Example Streaming Step\"\n }\n }],\n \"SupportedProducts\": [],\n \"VisibleToAllUsers\": false\n}]}\n\n \n " + "documentation": "\n

This API is deprecated and will eventually be removed. We recommend you use ListClusters,\n DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions\n instead.

\n

DescribeJobFlows returns a list of job flows that match all of the supplied parameters.\n The parameters can include a list of job flow IDs, job flow states, and restrictions on job\n flow creation date and time.

\n

Regardless of supplied parameters, only job flows created within the last two months are\n returned.

\n

If no parameters are supplied, then job flows matching either of the following criteria\n are returned:

\n \n

Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions.

\n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.DescribeJobFlows\nContent-Length: 62\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130715T220330Z\nX-Amz-Content-Sha256: fce83af973f96f173512aca2845c56862b946feb1de0600326f1365b658a0e39\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130715/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=29F98a6f44e05ad54fe1e8b3d1a7101ab08dc3ad348995f89c533693cee2bb3b\nAccept: */*\n\n{\n \"JobFlowIds\": [\"j-ZKIY4CKQRX72\"],\n \"DescriptionType\": \"EXTENDED\"\n}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 634d4142-ed9a-11e2-bbba-b56d7d016ec4\nContent-Type: application/x-amz-json-1.1\nContent-Length: 1624\nDate: Mon, 15 Jul 2013 22:03:31 GMT\n\n{\"JobFlows\": [{\n \"AmiVersion\": \"2.3.6\",\n \"BootstrapActions\": [],\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"LastStateChangeReason\": \"Steps completed\",\n \"ReadyDateTime\": 1.373923754E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"Instances\": {\n \"HadoopVersion\": \"1.0.3\",\n \"InstanceCount\": 1,\n \"InstanceGroups\": [{\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923995E9,\n \"InstanceGroupId\": \"ig-3SRUWV3E0NB7K\",\n \"InstanceRequestCount\": 1,\n \"InstanceRole\": \"MASTER\",\n \"InstanceRunningCount\": 0,\n \"InstanceType\": \"m1.small\",\n \"LastStateChangeReason\": \"Job flow terminated\",\n \"Market\": \"ON_DEMAND\",\n \"Name\": \"Master InstanceGroup\",\n \"ReadyDateTime\": 1.37392375E9,\n \"StartDateTime\": 1.373923646E9,\n \"State\": \"ENDED\"\n }],\n \"KeepJobFlowAliveWhenNoSteps\": false,\n \"MasterInstanceId\": \"i-8c4fbbef\",\n \"MasterInstanceType\": \"m1.small\",\n \"MasterPublicDnsName\": \"ec2-107-20-46-140.compute-1.amazonaws.com\",\n \"NormalizedInstanceHours\": 1,\n \"Placement\": {\"AvailabilityZone\": \"us-east-1a\"},\n \"TerminationProtected\": false\n },\n \"JobFlowId\": \"j-ZKIY4CKQRX72\",\n \"Name\": \"Development Job Flow\",\n \"Steps\": [{\n \"ExecutionStatusDetail\": {\n \"CreationDateTime\": 1.373923429E9,\n \"EndDateTime\": 1.373923914E9,\n \"StartDateTime\": 1.373923754E9,\n \"State\": \"COMPLETED\"\n },\n \"StepConfig\": {\n \"ActionOnFailure\": \"CANCEL_AND_WAIT\",\n \"HadoopJarStep\": {\n \"Args\": [\n \"-input\",\n \"s3://elasticmapreduce/samples/wordcount/input\",\n \"-output\",\n \"s3://examples-bucket/example-output\",\n \"-mapper\",\n \"s3://elasticmapreduce/samples/wordcount/wordSplitter.py\",\n \"-reducer\",\n \"aggregate\"\n ],\n \"Jar\": \"/home/hadoop/contrib/streaming/hadoop-streaming.jar\",\n \"Properties\": []\n },\n \"Name\": \"Example Streaming Step\"\n }\n }],\n \"SupportedProducts\": [],\n \"VisibleToAllUsers\": false\n}]}\n\n \n " }, "DescribeStep": { "name": "DescribeStep", @@ -1268,12 +1285,14 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster with steps to describe.

\n " + "documentation": "\n

The identifier of the cluster with steps to describe.

\n ", + "required": true }, "StepId": { "shape_name": "StepId", "type": "string", - "documentation": "\n

The identifier of the step to describe.

\n " + "documentation": "\n

The identifier of the step to describe.

\n ", + "required": true } }, "documentation": "\n

This input determines which step to describe.

\n " @@ -1347,7 +1366,7 @@ "CANCEL_AND_WAIT", "CONTINUE" ], - "documentation": "\n

This specifies what action to take when the cluster step fails. TERMINATE_JOB_FLOW is deprecated, use TERMINATE_CLUSTER instead.\n

\n " + "documentation": "\n

This specifies what action to take when the cluster step fails. Possible values are TERMINATE_CLUSTER, CANCEL_AND_WAIT, and CONTINUE.\n

\n " }, "Status": { "shape_name": "StepStatus", @@ -1398,12 +1417,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n

\n " + "documentation": "\n

The date and time when the cluster step execution started.\n

\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n

\n " + "documentation": "\n

The date and time when the cluster step execution completed or failed.\n

\n " } }, "documentation": "\n

The timeline of the cluster step status over time.\n

\n " @@ -1425,10 +1444,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1439,15 +1458,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides more detail about the cluster step.

\n " @@ -1461,12 +1480,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The cluster identifier for the bootstrap actions to list.

\n " + "documentation": "\n

The cluster identifier for the bootstrap actions to list\n .

\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve\n .

\n " } }, "documentation": "\n

This input determines which bootstrap actions to retrieve.

\n " @@ -1505,15 +1525,15 @@ }, "documentation": "\n

An entity describing an executable that runs on a cluster.

\n " }, - "documentation": "\n

The bootstrap actions associated with the cluster.

\n " + "documentation": "\n

The bootstrap actions associated with the cluster\n .

\n " }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve\n .

\n " } }, - "documentation": "\n

This output contains the bootstrap actions detail.

\n " + "documentation": "\n

This output contains the boostrap actions detail\n .

\n " }, "errors": [ { @@ -1523,10 +1543,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1537,15 +1557,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides information about the bootstrap actions associated with a cluster.

\n \n " @@ -1559,12 +1579,12 @@ "CreatedAfter": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The creation date and time beginning value filter for listing clusters.

\n " + "documentation": "\n

The creation date and time beginning value filter for listing clusters\n .

\n " }, "CreatedBefore": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The creation date and time end value filter for listing clusters.

\n " + "documentation": "\n

The creation date and time end value filter for listing clusters\n .

\n " }, "ClusterStates": { "shape_name": "ClusterStateList", @@ -1588,7 +1608,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.\n

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.\n

\n " } }, "documentation": "\n

This input determines how the ListClusters action filters the list of clusters that it returns.

\n " @@ -1691,7 +1711,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned. \n

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.\n

\n " } }, "documentation": "\n

This contains a ClusterSummaryList with the cluster details; for example, the cluster IDs, names, and status.

\n " @@ -1704,10 +1724,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1718,15 +1738,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides the status of all clusters visible to this AWS account. Allows you to filter the list of clusters based on certain criteria; for example, filtering by cluster creation date and time or by status.\n This call returns a maximum of 50 clusters per call, but returns a marker to track the paging of the cluster list across multiple ListClusters calls. \n

\n \n " @@ -1740,12 +1760,13 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the instance groups.

\n " + "documentation": "\n

The identifier of the cluster for which to list the instance groups.

\n ", + "required": true }, "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instance groups to retrieve.

\n " @@ -1832,7 +1853,7 @@ "SHUTTING_DOWN", "ENDED" ], - "documentation": "\n

The current state of the instance group. The following values are deprecated: ARRESTED, SHUTTING_DOWN, and ENDED. Use SUSPENDED, TERMINATING, and TERMINATED instead, respectively.\n

\n " + "documentation": "\n

The current state of the instance group.

\n " }, "StateChangeReason": { "shape_name": "InstanceGroupStateChangeReason", @@ -1890,7 +1911,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instance groups to retrieve.

\n " @@ -1903,10 +1924,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -1917,15 +1938,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides all available details about the instance groups in a cluster.

\n \n " @@ -1939,7 +1960,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the instances.

\n " + "documentation": "\n

The identifier of the cluster for which to list the instances.

\n ", + "required": true }, "InstanceGroupId": { "shape_name": "InstanceGroupId", @@ -1964,7 +1986,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which instances to list.

\n " @@ -2083,7 +2105,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This output contains the list of instances.

\n " @@ -2096,10 +2118,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2110,15 +2132,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides information about the cluster instances that Amazon EMR provisions on behalf of a user when it creates the cluster. \n For example, this operation indicates when the EC2 instances reach the Ready state, when instances become available to Amazon EMR to use for jobs, and the IP addresses for cluster instances, etc.\n

\n " @@ -2132,7 +2154,8 @@ "ClusterId": { "shape_name": "ClusterId", "type": "string", - "documentation": "\n

The identifier of the cluster for which to list the steps.

\n " + "documentation": "\n

The identifier of the cluster for which to list the steps.

\n ", + "required": true }, "StepStates": { "shape_name": "StepStateList", @@ -2155,7 +2178,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Provide the pagination token from earlier API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This input determines which steps to list.

\n " @@ -2230,12 +2253,12 @@ "StartDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution started.\n Due to delays in step status reporting, this can display a time which pre-dates a previous call to DescribeStep that indicated the step was not yet running. \n

\n " + "documentation": "\n

The date and time when the cluster step execution started.\n

\n " }, "EndDateTime": { "shape_name": "Date", "type": "timestamp", - "documentation": "\n

The date and time when the cluster step execution completed or failed. \n This can display a time that pre-dates a call to DescribeStep that indicates the step is running, due to delays in step status reporting. \n

\n " + "documentation": "\n

The date and time when the cluster step execution completed or failed.\n

\n " } }, "documentation": "\n

The timeline of the cluster step status over time.\n

\n " @@ -2251,7 +2274,7 @@ "Marker": { "shape_name": "Marker", "type": "string", - "documentation": "\n

The pagination token is a random string indicating whether there are more results to fetch. Use the pagination token in later API calls to retrieve the next page of results. When the value is null, all results have been returned.

\n " + "documentation": "\n

The pagination token that indicates the next set of results to retrieve.

\n " } }, "documentation": "\n

This output contains the list of steps.

\n " @@ -2264,10 +2287,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2278,15 +2301,15 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], "documentation": "\n

Provides a list of steps for the cluster. \n

\n " @@ -2326,7 +2349,7 @@ "type": "string", "documentation": null }, - "documentation": "\n

The EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.

\n " + "documentation": "\n

The EC2 InstanceIds to terminate. For advanced users only. \n Once you terminate the instances, the instance group will not return to its original requested size.

\n " } }, "documentation": "\n

Modify an instance group size.

\n " @@ -2357,7 +2380,8 @@ "ResourceId": { "shape_name": "ResourceId", "type": "string", - "documentation": "\n

The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

\n " + "documentation": "\n

The Amazon EMR resource identifier from which tags will be removed. This value must be a cluster identifier.

\n ", + "required": true }, "TagKeys": { "shape_name": "StringList", @@ -2367,17 +2391,18 @@ "type": "string", "documentation": null }, - "documentation": "\n

A list of tag keys to remove from a resource.

\n " + "documentation": "\n

A list of tag keys to remove from a resource.

\n ", + "required": true } }, - "documentation": "\n

This input identifies a cluster and a list of tags to remove. \n

\n " + "documentation": "\n

This input identifies a cluster and a list of tags to remove. \n

\n " }, "output": { "shape_name": "RemoveTagsOutput", "type": "structure", "members": { }, - "documentation": "\n

This output indicates the result of removing tags from a resource. \n

\n " + "documentation": "\n

This output indicates the result of removing tags from a resource. \n

\n " }, "errors": [ { @@ -2387,10 +2412,10 @@ "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n \n " + "documentation": "\n

This exception occurs when there is an internal failure in the EMR service.

\n\n " }, { "shape_name": "InvalidRequestException", @@ -2401,18 +2426,18 @@ "type": "string", "min_length": 1, "max_length": 256, - "documentation": "\n

The error code associated with the exception.

\n \n " + "documentation": "\n

The error code associated with the exception.

\n\n " }, "Message": { "shape_name": "ErrorMessage", "type": "string", - "documentation": "\n

The message associated with the exception.

\n \n " + "documentation": "\n

The message associated with the exception.

\n\n " } }, - "documentation": "\n

This exception occurs when there is something wrong with user input.

\n \n " + "documentation": "\n

This exception occurs when there is something wrong with user input.

\n\n " } ], - "documentation": "\n

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n \n " + "documentation": "\n

Removes tags from an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n

The following example removes the stack tag with value Prod from a cluster:

\n \n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.RemoveTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\", \n \"Value\": \"Prod\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n

The following example removes the stack and hbase tags from a cluster:

\n POST / HTTP/1.1 \nContent-Type: application/x-amz-json-1.1 \nX-Amz-Target: ElasticMapReduce.RemoveTags \nAUTHPARAMS \n{ \n \"ResourceId\": \"j-3U7TSX5GZFD8Y\", \n \"Tags\": [{ \n \"Key\": \"stack\" \n }, \n { \n \"Key\": \"hbase\" \n }] \n} \n\n\n HTTP/1.1 200 OK \nx-amzn-RequestId: 9da5a349-ed9e-11e2-90db-69a5154aeb8d \nContent-Type: application/x-amz-json-1.1 \nContent-Length: 71 \nDate: Mon, 15 Jul 2013 22:33:47 GMT \n{ \n} \n \n
\n " }, "RunJobFlow": { "name": "RunJobFlow", @@ -2803,6 +2828,14 @@ "max_length": 10280, "documentation": "\n

An IAM role for the job flow. The EC2 instances of the job flow assume this role. The default role is EMRJobflowDefault. In order to use the default role, you must have already created it using the CLI.

\n " }, + "ServiceRole": { + "shape_name": "XmlString", + "type": "string", + "pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*", + "min_length": 0, + "max_length": 10280, + "documentation": "\n

IAM role that Amazon ElasticMapReduce will assume to work with AWS resources on your behalf. You may set this parameter to the name of an existing IAM role.

\n " + }, "Tags": { "shape_name": "TagList", "type": "list", @@ -2813,15 +2846,15 @@ "Key": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined key, which is the minimum required information for a valid tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "Value": { "shape_name": "String", "type": "string", - "documentation": "\n

A user-defined value, which is optional in a tag.\n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A user-defined value, which is optional in a tag. \n For more information, see Tagging Amazon EMR Resources. \n

\n " } }, - "documentation": "\n

A key/value pair that contains user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clusters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " + "documentation": "\n

A key/value pair containing user-defined metadata that you can associate with an Amazon EMR resource. Tags make it easier to associate clusters in various ways, such as grouping clu\\\nsters to track your Amazon EMR resource allocation costs. \n For more information, see Tagging Amazon EMR Resources. \n

\n " }, "documentation": "\n

A list of tags to associate with a cluster and propagate to Amazon EC2 instances.

\n " } @@ -2972,4 +3005,4 @@ "documentation": "\n

\n TerminateJobFlows shuts a list of job flows down. When a job flow is shut down, any step\n not yet completed is canceled and the EC2 instances on which the job flow is running are\n stopped. Any log files not already saved are uploaded to Amazon S3 if a LogUri was\n specified when the job flow was created. \n

\n

\n The call to TerminateJobFlows is asynchronous. Depending on the configuration of the job flow, \n it may take up to 5-20 minutes for the job flow to \n completely terminate and release allocated resources, such as Amazon EC2 instances.\n

\n \n \n POST / HTTP/1.1\nContent-Type: application/x-amz-json-1.1\nX-Amz-Target: ElasticMapReduce.TerminateJobFlows\nContent-Length: 33\nUser-Agent: aws-sdk-ruby/1.9.2 ruby/1.9.3 i386-mingw32\nHost: us-east-1.elasticmapreduce.amazonaws.com\nX-Amz-Date: 20130716T211858Z\nX-Amz-Content-Sha256: ab64713f61e066e80a6083844b9249b6c6362d34a7ae7393047aa46d38b9e315\nAuthorization: AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130716/us-east-1/elasticmapreduce/aws4_request, SignedHeaders=content-length;content-type;host;user-agent;x-amz-content-sha256;x-amz-date;x-amz-target, Signature=9791416eaf09f36aa753a324b0de27ff5cc7084b8548cc748487a2bcb3439d58\nAccept: */*\n\n{\"JobFlowIds\": [\"j-3TS0OIYO4NFN\"]}\n\n\n HTTP/1.1 200 OK\nx-amzn-RequestId: 5551a7c9-ee5d-11e2-9542-25296c300ff0\nContent-Type: application/x-amz-json-1.1\nContent-Length: 0\nDate: Tue, 16 Jul 2013 21:18:59 GMT\n \n \n " } } -} \ No newline at end of file +}